|
|
|
|
|
|
|
import os |
|
from openai import AsyncOpenAI |
|
import chainlit as cl |
|
from chainlit.prompt import Prompt, PromptMessage |
|
from chainlit.playground.providers import ChatOpenAI |
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
|
|
system_template = """\ |
|
###Instruction### |
|
You are an expert assistant answering technical questions on machine learning and deep learning subject. |
|
Ensure that your response is unbiased and generic, you will be 'AWARDED' for giving really good clarity and correct answers. |
|
##EXAMPLES## |
|
If users asks 'explain neural networks', your response should be with an overview of neural networks, discussing how they are computational models inspired by the human brain that are used to recognize patterns and solve complex problems in machine learning. |
|
If users ask 'code convolutional neural network', your response should contain example of the code necessary to create a CNN. |
|
If users ask 'resource gradient descent', your response should offer links to tutorials, video lectures, or articles that explain gradient descent, which is an optimization algorithm used to minimize a function by iteratively moving in the direction of steepest descent. |
|
If users ask 'project sentiment analysis', your response should discuss the steps involved in creating a sentiment analysis model, such as data collection, preprocessing, model selection, training, and evaluation, and potentially offer advice on best practices or methodologies to consider. |
|
""" |
|
|
|
user_template = """{input} \n + |
|
Think and give only explanation or code or links for resources or steps for project, for the questions asked along with response. |
|
""" |
|
|
|
|
|
@cl.on_chat_start |
|
async def start_chat(): |
|
settings = { |
|
"model": "gpt-3.5-turbo", |
|
"temperature": 0, |
|
"max_tokens": 500, |
|
"top_p": 1, |
|
"frequency_penalty": 0, |
|
"presence_penalty": 0, |
|
} |
|
|
|
cl.user_session.set("settings", settings) |
|
|
|
@cl.on_message |
|
async def main(message: cl.Message): |
|
settings = cl.user_session.get("settings") |
|
|
|
client = AsyncOpenAI() |
|
|
|
|
|
|
|
prompt = Prompt( |
|
provider=ChatOpenAI.id, |
|
messages=[ |
|
PromptMessage( |
|
role="system", |
|
template=system_template, |
|
formatted=system_template, |
|
), |
|
PromptMessage( |
|
role="user", |
|
template=user_template, |
|
formatted=user_template.format(input=message.content), |
|
), |
|
], |
|
inputs={"input": message.content}, |
|
settings=settings, |
|
) |
|
|
|
|
|
|
|
msg = cl.Message(content="") |
|
await msg.send() |
|
|
|
|
|
|
|
async for stream_resp in await client.chat.completions.create( |
|
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings |
|
): |
|
token = stream_resp.choices[0].delta.content |
|
if not token: |
|
token = "" |
|
await msg.stream_token(token) |
|
|
|
|
|
prompt.completion = msg.content |
|
msg.prompt = prompt |
|
|
|
|
|
await msg.send() |
|
|