import os import io import time import asyncio import requests from tqdm import tqdm from fastapi import FastAPI, HTTPException, Request import uvicorn from llama_cpp import Llama app = FastAPI() # Configuración de modelos model_configs = [ {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"}, {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"}, {"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"}, {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"}, {"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"}, {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"}, {"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"}, {"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"}, {"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"}, {"repo_id": "Ffftdtd5dtft/starcoder2-15b-Q2_K-GGUF", "filename": "starcoder2-15b-q2_k.gguf", "name": "Starcoder2 15B"}, {"repo_id": "Ffftdtd5dtft/gemma-2-2b-it-Q2_K-GGUF", "filename": "gemma-2-2b-it-q2_k.gguf", "name": "Gemma 2-2B IT"}, {"repo_id": "Ffftdtd5dtft/sarvam-2b-v0.5-Q2_K-GGUF", "filename": "sarvam-2b-v0.5-q2_k.gguf", "name": "Sarvam 2B v0.5"}, {"repo_id": "Ffftdtd5dtft/WizardLM-13B-Uncensored-Q2_K-GGUF", "filename": "wizardlm-13b-uncensored-q2_k.gguf", "name": "WizardLM 13B Uncensored"}, {"repo_id": "Ffftdtd5dtft/Qwen2-Math-72B-Instruct-Q2_K-GGUF", "filename": "qwen2-math-72b-instruct-q2_k.gguf", "name": "Qwen2 Math 72B Instruct"}, {"repo_id": "Ffftdtd5dtft/WizardLM-7B-Uncensored-Q2_K-GGUF", "filename": "wizardlm-7b-uncensored-q2_k.gguf", "name": "WizardLM 7B Uncensored"}, {"repo_id": "Ffftdtd5dtft/Qwen2-Math-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-math-7b-instruct-q2_k.gguf", "name": "Qwen2 Math 7B Instruct"} ] class ModelManager: def __init__(self): self.models = {} self.part_size = 1024 * 1024 # Tamaño de cada parte en bytes (1 MB) async def download_model_to_memory(self, model_config): url = f"https://huggingface.co/{model_config['repo_id']}/resolve/main/{model_config['filename']}" print(f"Descargando modelo desde {url}") try: start_time = time.time() response = requests.get(url) response.raise_for_status() model_file = io.BytesIO(response.content) end_time = time.time() download_duration = end_time - start_time print(f"Descarga completa para {model_config['name']} en {download_duration:.2f} segundos") return model_file except requests.RequestException as e: raise HTTPException(status_code=500, detail=f"Error al descargar el modelo: {e}") async def save_model_to_temp_file(self, model_file, model_config): temp_filename = f"/tmp/{model_config['filename']}" print(f"Guardando el modelo en {temp_filename}") with open(temp_filename, 'wb') as f: f.write(model_file.getvalue()) print(f"Modelo guardado en {temp_filename}") return temp_filename async def load_model(self, model_config): model_file = await self.download_model_to_memory(model_config) temp_filename = await self.save_model_to_temp_file(model_file, model_config) try: start_time = time.time() print(f"Cargando modelo desde {temp_filename}") llama = Llama.load(temp_filename) end_time = time.time() load_duration = end_time - start_time if load_duration > 0: print(f"Modelo {model_config['name']} tardó {load_duration:.2f} segundos en cargar, dividiendo automáticamente") await self.handle_large_model(temp_filename, model_config) else: print(f"Modelo {model_config['name']} cargado correctamente en {load_duration:.2f} segundos") tokenizer = llama.tokenizer model_data = { 'model': llama, 'tokenizer': tokenizer, 'pad_token': tokenizer.pad_token, 'pad_token_id': tokenizer.pad_token_id, 'eos_token': tokenizer.eos_token, 'eos_token_id': tokenizer.eos_token_id, 'bos_token': tokenizer.bos_token, 'bos_token_id': tokenizer.bos_token_id, 'unk_token': tokenizer.unk_token, 'unk_token_id': tokenizer.unk_token_id } self.models[model_config['name']] = model_data except Exception as e: print(f"Error al cargar el modelo: {e}") async def handle_large_model(self, model_filename, model_config): total_size = os.path.getsize(model_filename) num_parts = (total_size + self.part_size - 1) // self.part_size print(f"Modelo {model_config['name']} dividido en {num_parts} partes") with open(model_filename, 'rb') as file: for i in tqdm(range(num_parts), desc=f"Indexando {model_config['name']}"): start = i * self.part_size end = min(start + self.part_size, total_size) file.seek(start) model_part = io.BytesIO(file.read(end - start)) await self.index_model_part(model_part, i) async def index_model_part(self, model_part, part_index): part_name = f"part_{part_index}" print(f"Indexando parte {part_index}") temp_filename = f"/tmp/{part_name}.gguf" with open(temp_filename, 'wb') as f: f.write(model_part.getvalue()) print(f"Parte {part_index} indexada y guardada") async def generate_response(self, user_input): results = [] for model_name, model_data in self.models.items(): print(f"Generando respuesta con el modelo {model_name}") try: tokenizer = model_data['tokenizer'] input_ids = tokenizer(user_input, return_tensors="pt").input_ids outputs = model_data['model'].generate(input_ids) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) parts = [generated_text[i:i + 1000] for i in range(0, len(generated_text), 1000)] results.append({ 'model_name': model_name, 'generated_text_parts': parts }) except Exception as e: print(f"Error al generar respuesta con el modelo {model_name}: {e}") results.append({'model_name': model_name, 'error': str(e)}) return results @app.post("/generate/") async def generate(request: Request): data = await request.json() user_input = data.get('input', '') if not user_input: raise HTTPException(status_code=400, detail="Se requiere una entrada de usuario.") model_manager = ModelManager() tasks = [model_manager.load_model(config) for config in model_configs] await asyncio.gather(*tasks) responses = await model_manager.generate_response(user_input) return {"responses": responses} def start_uvicorn(): uvicorn.run(app, host="0.0.0.0", port=7860) if __name__ == "__main__": asyncio.run(start_uvicorn())