Manojajj commited on
Commit
0f347bb
·
verified ·
1 Parent(s): 271f3a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -7
app.py CHANGED
@@ -1,21 +1,32 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
4
  # Function to interact with the model using the Inference API
5
  def chat_with_model(user_input, hf_api_key):
 
 
6
  # Initialize the InferenceClient with the provided API key
7
  client = InferenceClient(api_key=hf_api_key)
8
 
9
- # Define the messages for the chat (system message tailored for a code assistant)
10
- messages = [
11
- {"role": "system", "content": "You are a code assistant that helps with code generation, debugging, and explanations."},
12
- {"role": "user", "content": user_input}
13
- ]
 
 
 
 
 
 
14
 
15
  # Create a stream for chat completions using the API
16
  stream = client.chat.completions.create(
17
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
18
- messages=messages,
19
  max_tokens=500,
20
  stream=True
21
  )
@@ -24,6 +35,10 @@ def chat_with_model(user_input, hf_api_key):
24
  response = ""
25
  for chunk in stream:
26
  response += chunk.choices[0].delta.content
 
 
 
 
27
  return response
28
 
29
  # Create the Gradio interface
@@ -39,4 +54,4 @@ iface = gr.Interface(
39
  )
40
 
41
  # Launch the interface
42
- iface.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize an empty list to store the conversation history
5
+ conversation_history = []
6
+
7
  # Function to interact with the model using the Inference API
8
  def chat_with_model(user_input, hf_api_key):
9
+ global conversation_history
10
+
11
  # Initialize the InferenceClient with the provided API key
12
  client = InferenceClient(api_key=hf_api_key)
13
 
14
+ # Add the user's message to the conversation history
15
+ conversation_history.append({"role": "user", "content": user_input})
16
+
17
+ # Define the system message (defining the assistant role)
18
+ system_message = {
19
+ "role": "system",
20
+ "content": "You are a code assistant that helps with code generation, debugging, and explanations."
21
+ }
22
+
23
+ # Add system message to the conversation history
24
+ conversation_history.insert(0, system_message)
25
 
26
  # Create a stream for chat completions using the API
27
  stream = client.chat.completions.create(
28
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
29
+ messages=conversation_history,
30
  max_tokens=500,
31
  stream=True
32
  )
 
35
  response = ""
36
  for chunk in stream:
37
  response += chunk.choices[0].delta.content
38
+
39
+ # Add the assistant's response to the conversation history
40
+ conversation_history.append({"role": "assistant", "content": response})
41
+
42
  return response
43
 
44
  # Create the Gradio interface
 
54
  )
55
 
56
  # Launch the interface
57
+ iface.launch()