teaevo commited on
Commit
23432db
·
1 Parent(s): 43d191a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -45
app.py CHANGED
@@ -1,58 +1,28 @@
1
  import gradio as gr
2
- #from transformers import pipeline
3
-
4
- """
5
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
6
-
7
- def predict(image):
8
- predictions = pipeline(image)
9
- return {p["label"]: p["score"] for p in predictions}
10
-
11
- gr.Interface(
12
- predict,
13
- inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
14
- outputs=gr.outputs.Label(num_top_classes=2),
15
- title="Hot Dog? Or Not?",
16
- ).launch()
17
- """
18
-
19
  from transformers import AutoModelForCausalLM, AutoTokenizer
20
 
21
  def chatbot_response(user_message):
22
- # Load the pre-trained model and tokenizer
23
- model_name = "gpt2" # Replace with the name of the pre-trained model you want to use
24
  tokenizer = AutoTokenizer.from_pretrained(model_name)
25
  model = AutoModelForCausalLM.from_pretrained(model_name)
26
 
27
- # Tokenize the user's message and generate the response
28
  inputs = tokenizer.encode("User: " + user_message, return_tensors="pt")
29
  outputs = model.generate(inputs, max_length=100, num_return_sequences=1)
30
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
 
32
  return response
33
 
34
- user_input = input("You: ")
35
-
36
- gr.Interface(
37
- chatbot_response,
38
- inputs=user_input,
39
- outputs=gr.outputs.Label(num_top_classes=2),
40
- title="NLP Test",
41
- ).launch()
42
-
43
- """
44
- if __name__ == '__main__':
45
- print("Chatbot: Hello! I'm your chatbot. Type 'exit' to end the conversation.")
46
-
47
- while True:
48
- user_input = input("You: ")
49
-
50
- if user_input.lower() == 'exit':
51
- break
52
-
53
- response = chatbot_response(user_input)
54
- print("Chatbot:", response)
55
-
56
- """
57
-
58
-
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  def chatbot_response(user_message):
5
+ model_name = "gpt2" # You can change this to any other model from the list above
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
 
9
  inputs = tokenizer.encode("User: " + user_message, return_tensors="pt")
10
  outputs = model.generate(inputs, max_length=100, num_return_sequences=1)
11
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
12
 
13
  return response
14
 
15
+ # Define the chatbot interface using Gradio
16
+ iface = gr.Interface(
17
+ fn=chatbot_response,
18
+ inputs=gr.Textbox(prompt="You:"),
19
+ outputs=gr.Textbox(),
20
+ live=True,
21
+ capture_session=True,
22
+ title="Chatbot",
23
+ description="Type your message in the box above, and the chatbot will respond.",
24
+ )
25
+
26
+ # Launch the Gradio interface
27
+ if __name__ == "__main__":
28
+ iface.launch()