teaevo commited on
Commit
f24bed6
·
1 Parent(s): ec9ef8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -2
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
 
4
  pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
 
6
  def predict(image):
@@ -12,4 +13,33 @@ gr.Interface(
12
  inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
13
  outputs=gr.outputs.Label(num_top_classes=2),
14
  title="Hot Dog? Or Not?",
15
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ #from transformers import pipeline
3
 
4
+ """
5
  pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
6
 
7
  def predict(image):
 
13
  inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
14
  outputs=gr.outputs.Label(num_top_classes=2),
15
  title="Hot Dog? Or Not?",
16
+ ).launch()
17
+ """
18
+
19
+ from transformers import AutoModelForCausalLM, AutoTokenizer
20
+
21
+ def chatbot_response(user_message):
22
+ # Load the pre-trained model and tokenizer
23
+ model_name = "your_pretrained_model_name" # Replace with the name of the pre-trained model you want to use
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
25
+ model = AutoModelForCausalLM.from_pretrained(model_name)
26
+
27
+ # Tokenize the user's message and generate the response
28
+ inputs = tokenizer.encode("User: " + user_message, return_tensors="pt")
29
+ outputs = model.generate(inputs, max_length=100, num_return_sequences=1)
30
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
+
32
+ return response
33
+
34
+ if __name__ == '__main__':
35
+ print("Chatbot: Hello! I'm your chatbot. Type 'exit' to end the conversation.")
36
+
37
+ while True:
38
+ user_input = input("You: ")
39
+
40
+ if user_input.lower() == 'exit':
41
+ break
42
+
43
+ response = chatbot_response(user_input)
44
+ print("Chatbot:", response)
45
+