Spaces:
Sleeping
Sleeping
pip install transformers | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
import gradio as gr | |
# Use a pipeline as a high-level helper | |
pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-3.1-70B") | |
# Load model directly | |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-70B") | |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-70B") | |
# Load sentiment analysis pipeline | |
sentiment_analyzer = pipeline("sentiment-analysis") | |
# Initialize conversation context | |
context = [] | |
def predict(context, input_text): | |
"""Generate response based on context and input.""" | |
context.append(input_text) | |
inputs = tokenizer(" ".join(context), return_tensors="pt") | |
outputs = model.generate(inputs.input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
context.append(response) | |
return response | |
def predict_with_emotion(context, input_text): | |
"""Generate response with emotion detection.""" | |
sentiment = sentiment_analyzer(input_text)[0]['label'] | |
response = predict(context, input_text) | |
if sentiment == 'NEGATIVE': | |
response = "I'm sorry to hear that. " + response | |
elif sentiment == 'POSITIVE': | |
response = "That's great! " + response | |
return response | |
def chatbot(input_text): | |
"""Gradio chatbot function.""" | |
global context | |
response = predict_with_emotion(context, input_text) | |
return response | |
# Create Gradio interface | |
iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Contextual Emotion-Aware LLaMA-70B Chatbot") | |
iface.launch() | |