Hastika commited on
Commit
0e89de4
·
verified ·
1 Parent(s): 6493831

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -1
app.py CHANGED
@@ -1,3 +1,43 @@
1
  import gradio as gr
 
2
 
3
- gr.load("models/codellama/CodeLlama-34b-Instruct-hf").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ # Load the model and tokenizer
5
+ model_name = "models/codellama/CodeLlama-34b-Instruct-hf"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
8
+
9
+ # Create a pipeline for chatbot interaction
10
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
+
12
+ # Function to handle chatbot interaction
13
+ def chatbot_interaction(user_input, history=[]):
14
+ # Append user message to the conversation history
15
+ history.append(("user", user_input))
16
+
17
+ # Format the conversation history for the model
18
+ conversation = "\n".join([f"{role}: {message}" for role, message in history])
19
+
20
+ # Generate response from the model
21
+ response = pipe(conversation, max_length=1024, do_sample=True)[0]['generated_text']
22
+
23
+ # Extract the assistant's response and append it to the history
24
+ assistant_message = response[len(conversation):].strip()
25
+ history.append(("assistant", assistant_message))
26
+
27
+ return history, history
28
+
29
+ # Gradio interface
30
+ with gr.Blocks() as demo:
31
+ chatbot = gr.Chatbot()
32
+ msg = gr.Textbox(label="Your message:")
33
+ clear = gr.Button("Clear")
34
+
35
+ def respond(message, chat_history):
36
+ chat_history, updated_history = chatbot_interaction(message, chat_history)
37
+ return updated_history, ""
38
+
39
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
40
+ clear.click(lambda: None, None, chatbot) # Clear the chat
41
+
42
+ # Launch the Gradio app
43
+ demo.launch()