|
import gradio as gr |
|
|
|
|
|
""" |
|
pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") |
|
|
|
def predict(image): |
|
predictions = pipeline(image) |
|
return {p["label"]: p["score"] for p in predictions} |
|
|
|
gr.Interface( |
|
predict, |
|
inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"), |
|
outputs=gr.outputs.Label(num_top_classes=2), |
|
title="Hot Dog? Or Not?", |
|
).launch() |
|
""" |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
def chatbot_response(user_message): |
|
|
|
model_name = "your_pretrained_model_name" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
inputs = tokenizer.encode("User: " + user_message, return_tensors="pt") |
|
outputs = model.generate(inputs, max_length=100, num_return_sequences=1) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
if __name__ == '__main__': |
|
print("Chatbot: Hello! I'm your chatbot. Type 'exit' to end the conversation.") |
|
|
|
while True: |
|
user_input = input("You: ") |
|
|
|
if user_input.lower() == 'exit': |
|
break |
|
|
|
response = chatbot_response(user_input) |
|
print("Chatbot:", response) |
|
|
|
|