willbraun commited on
Commit
6486f3f
1 Parent(s): 9763456

got chatbot working

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. __pycache__/app.cpython-312.pyc +0 -0
  3. app.py +54 -27
  4. requirements.txt +1 -0
.gitignore CHANGED
@@ -0,0 +1 @@
 
 
1
+ .env
__pycache__/app.cpython-312.pyc ADDED
Binary file (2.23 kB). View file
 
app.py CHANGED
@@ -1,31 +1,58 @@
 
1
  import gradio as gr
2
- from langchain_huggingface import HuggingFacePipeline
3
-
4
- llm = HuggingFacePipeline.from_model_id(
5
- model_id="openai-community/gpt2",
6
- task="text-generation",
7
- pipeline_kwargs={
8
- "max_new_tokens": 5,
9
- "temperature": 1,
10
- },
11
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- def greet(name):
14
- return "Hello " + name + "!!"
15
-
16
- # Define a function to generate responses
17
- def chat_with_model(user_input):
18
- print(llm.invoke(user_input))
19
- return llm.invoke(user_input)
20
-
21
- # Create a Gradio interface
22
- demo = gr.Interface(
23
- fn=chat_with_model,
24
- inputs="text",
25
- outputs="text",
26
- title="Chat with GPT-2",
27
- description="Enter a prompt and get a response from GPT-2."
28
  )
29
 
30
- # Launch the interface
31
- demo.launch(share=True)
 
1
+ import os
2
  import gradio as gr
3
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
4
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
5
+
6
+ # Function to handle responses
7
+ def respond(user_input, history):
8
+ # Configure Hugging Face model endpoint
9
+ llm = HuggingFaceEndpoint(
10
+ repo_id="HuggingFaceTB/SmolLM2-1.7B-Instruct",
11
+ task="text-generation",
12
+ temperature=0.5,
13
+ repetition_penalty=1.03,
14
+ huggingfacehub_api_token=os.getenv('HUGGING_FACE_TOKEN')
15
+ )
16
+ chat = ChatHuggingFace(llm=llm, verbose=True)
17
+
18
+ # Prepare system message to set the context
19
+ system_message = "You are playing a word guessing game. Guess ONE word based on previous guesses and avoid repeats. DO NOT ANSWER WITH MORE THAN ONE WORD."
20
+
21
+ # Track guessed words from history
22
+ last_human_message, last_ai_message = history[-1] if history else ("", "")
23
+ # guessed_words_summary = "Past guesses: " + ", ".join() if guessed_words else "No past guesses."
24
+ # print(guessed_words_summary)
25
+ # Add messages to the conversation
26
+ messages = [
27
+ SystemMessage(content=system_message),
28
+ SystemMessage(content="Last human message: " + last_human_message + " | Last AI message: " + last_ai_message),
29
+ ]
30
+
31
+ # Load past conversation into messages
32
+ for human, ai in history:
33
+ messages.append(HumanMessage(content=human))
34
+ messages.append(AIMessage(content=ai))
35
+
36
+ # Add the current user input
37
+ messages.append(HumanMessage(content=user_input))
38
+
39
+ # Initialize response as an empty string
40
+ response = ""
41
+
42
+ # Stream response from the model
43
+ for message in chat.stream(messages):
44
+ if isinstance(message, str):
45
+ response += message
46
+ else:
47
+ response += message.content
48
+
49
+ yield response
50
 
51
+ # Set up the Gradio interface
52
+ demo = gr.ChatInterface(
53
+ respond,
54
+ title="WordSync",
55
+ description="A word guessing game where you and the AI try to guess the same word. The AI remembers past guesses!"
 
 
 
 
 
 
 
 
 
 
56
  )
57
 
58
+ demo.launch(share=True)
 
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  gradio
 
2
  langchain-huggingface
 
1
  gradio
2
+ langchain-core
3
  langchain-huggingface