|
import os |
|
import gradio as gr |
|
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace |
|
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage |
|
|
|
|
|
def respond(user_input, history): |
|
|
|
llm = HuggingFaceEndpoint( |
|
repo_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", |
|
task="text-generation", |
|
temperature=0.5, |
|
repetition_penalty=1.03, |
|
huggingfacehub_api_token=os.getenv('HUGGING_FACE_TOKEN') |
|
) |
|
chat = ChatHuggingFace(llm=llm, verbose=True) |
|
|
|
|
|
system_message = "You are playing a word guessing game. Guess ONE word based on previous guesses and avoid repeats. DO NOT ANSWER WITH MORE THAN ONE WORD." |
|
|
|
|
|
last_human_message, last_ai_message = history[-1] if history else ("", "") |
|
|
|
|
|
|
|
messages = [ |
|
SystemMessage(content=system_message), |
|
SystemMessage(content="Last human message: " + last_human_message + " | Last AI message: " + last_ai_message), |
|
] |
|
|
|
|
|
for human, ai in history: |
|
messages.append(HumanMessage(content=human)) |
|
messages.append(AIMessage(content=ai)) |
|
|
|
|
|
messages.append(HumanMessage(content=user_input)) |
|
|
|
|
|
response = "" |
|
|
|
|
|
for message in chat.stream(messages): |
|
if isinstance(message, str): |
|
response += message |
|
else: |
|
response += message.content |
|
|
|
yield response |
|
|
|
|
|
demo = gr.ChatInterface( |
|
respond, |
|
title="WordSync", |
|
description="A word guessing game where you and the AI try to guess the same word. The AI remembers past guesses!" |
|
) |
|
|
|
demo.launch(share=True) |
|
|