timeki's picture
add graphs in the result and fix sources retrieved
eee8932
raw
history blame
506 Bytes
from langchain_openai import ChatOpenAI
import os
try:
from dotenv import load_dotenv
load_dotenv()
except Exception:
pass
def get_llm(model="gpt-4o-mini",max_tokens=1024, temperature=0.0, streaming=True,timeout=30, **kwargs):
llm = ChatOpenAI(
model=model,
api_key=os.environ.get("THEO_API_KEY", None),
max_tokens = max_tokens,
streaming = streaming,
temperature=temperature,
timeout = timeout,
**kwargs,
)
return llm