Spaces:
Sleeping
Sleeping
import os | |
import requests | |
import torch | |
from .vector_db import VectorDB | |
from .open_ai_connector import OpenAIConnector | |
from .parameters import * | |
from fastapi import FastAPI, Header, HTTPException, BackgroundTasks | |
from fastapi.responses import FileResponse | |
import logging | |
import sys | |
app = FastAPI() | |
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s') | |
logger = logging.getLogger(__name__) | |
logger.setLevel(logging.DEBUG) | |
#logger.addHandler(logging.StreamHandler(sys.stdout)) | |
vector_db = VectorDB(emb_model, db_location, full_actions_list_file_path, num_sub_vectors, batch_size) | |
open_ai_connector = OpenAIConnector() | |
async def find_action(query: str): | |
#logging.basicConfig(filename='myapp.log', level=logging.INFO) | |
logger.info('Started') | |
#print('start action') | |
#data = vector_db.get_embedding_db_as_pandas() | |
#print(data) | |
prefiltered_names, prefiltered_descriptions = vector_db.retrieve_prefiltered_hits(query, K) | |
logger.info('prefiltered list') | |
#print(prefiltered_names) | |
logger.info('start query openAI') | |
response = open_ai_connector.query_open_ai(query, prefiltered_names, prefiltered_descriptions) | |
logger.info(response) | |
logger.info('Finished') | |
return {'success': True, 'query': query, 'response': response} | |
async def gpu_check(): | |
gpu = 'GPU not available' | |
if torch.cuda.is_available(): | |
gpu = 'GPU is available' | |
print("GPU is available") | |
else: | |
print("GPU is not available") | |
return {'success': True, 'response': 'hello world 3', 'gpu': gpu} | |