|
from .base_model import BaseModel |
|
|
|
import openai |
|
from tqdm import tqdm |
|
class GPT4Model(BaseModel): |
|
def __init__(self, |
|
generation_model="gpt-4-vision-preview", |
|
embedding_model="text-embedding-ada-002", |
|
temperature=0, |
|
) -> None: |
|
self.generation_model = generation_model |
|
self.embedding_model = embedding_model |
|
self.temperature = temperature |
|
|
|
def respond(self, messages: list) -> str: |
|
try: |
|
response = openai.ChatCompletion.create( |
|
messages=messages, |
|
model=self.generation_model, |
|
temperature=self.temperature, |
|
max_tokens=1000, |
|
).choices[0]['message']['content'] |
|
except: |
|
try: |
|
response = openai.ChatCompletion.create( |
|
messages=messages, |
|
model=self.generation_model, |
|
temperature=self.temperature, |
|
max_tokens=1000, |
|
).choices[0]['message']['content'] |
|
except: |
|
try: |
|
response = openai.ChatCompletion.create( |
|
messages=messages, |
|
model=self.generation_model, |
|
temperature=self.temperature, |
|
max_tokens=1000, |
|
).choices[0]['message']['content'] |
|
except: |
|
response = "No answer was provided." |
|
|
|
|
|
return response |
|
|
|
def embedding(self, texts: list) -> list: |
|
data = [] |
|
|
|
for i in range(0, len(texts), 2048): |
|
lower = i |
|
upper = min(i+2048, len(texts)) |
|
data += openai.Embedding.create(input=texts[lower:upper], |
|
model=self.embedding_model |
|
)["data"] |
|
|
|
embeddings = [d["embedding"] for d in data] |
|
|
|
return embeddings |