import gradio as gr #from transformers import pipeline """ pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") def predict(image): predictions = pipeline(image) return {p["label"]: p["score"] for p in predictions} gr.Interface( predict, inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"), outputs=gr.outputs.Label(num_top_classes=2), title="Hot Dog? Or Not?", ).launch() """ from transformers import AutoModelForCausalLM, AutoTokenizer def chatbot_response(user_message): # Load the pre-trained model and tokenizer model_name = "your_pretrained_model_name" # Replace with the name of the pre-trained model you want to use tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Tokenize the user's message and generate the response inputs = tokenizer.encode("User: " + user_message, return_tensors="pt") outputs = model.generate(inputs, max_length=100, num_return_sequences=1) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response if __name__ == '__main__': print("Chatbot: Hello! I'm your chatbot. Type 'exit' to end the conversation.") while True: user_input = input("You: ") if user_input.lower() == 'exit': break response = chatbot_response(user_input) print("Chatbot:", response)