Spaces:
Sleeping
Sleeping
import random | |
def random_response(message, history): | |
return random.choice(["Yes", "No"]) | |
import time | |
import gradio as gr | |
def yes_man(message, history): | |
if message.endswith("?"): | |
return "Yes" | |
else: | |
return "Ask me anything!" | |
def echo(message, history, system_prompt, tokens): | |
response = f"System prompt: {system_prompt}\n Message: {message}." | |
for i in range(min(len(response), int(tokens))): | |
time.sleep(0.05) | |
yield response[: i+1] | |
# from langchain.chat_models import ChatOpenAI | |
# from langchain.schema import AIMessage, HumanMessage | |
# import openai | |
# import gradio as gr | |
# import os | |
# os.environ["OPENAI_API_KEY"] = "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" # Replace with your key | |
# llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613') | |
# def predict(message, history): | |
# history_langchain_format = [] | |
# for human, ai in history: | |
# history_langchain_format.append(HumanMessage(content=human)) | |
# history_langchain_format.append(AIMessage(content=ai)) | |
# history_langchain_format.append(HumanMessage(content=message)) | |
# gpt_response = llm(history_langchain_format) | |
# return gpt_response.content | |
# gr.ChatInterface(predict).launch() | |
import openai | |
import gradio as gr | |
openai.api_key = "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" # Replace with your key | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema import AIMessage, HumanMessage | |
import openai | |
import gradio as gr | |
import os | |
os.environ["OPENAI_API_KEY"] = "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" | |
llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613') | |
def predict(message, history): | |
history_langchain_format = [] | |
for human, ai in history: | |
history_langchain_format.append(HumanMessage(content=human)) | |
history_langchain_format.append(AIMessage(content=ai)) | |
history_langchain_format.append(HumanMessage(content=message)) | |
gpt_response = llm(history_langchain_format) | |
return gpt_response.content | |
gr.ChatInterface(predict).launch() |