Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
from gtts import gTTS | |
import os | |
# Load the AgriQBot model from Hugging Face using the transformers library | |
tokenizer = AutoTokenizer.from_pretrained("mrSoul7766/AgriQBot") | |
model = AutoModelForSeq2SeqLM.from_pretrained("mrSoul7766/AgriQBot") | |
def respond( | |
message, | |
history=None, # Set history default to None | |
max_tokens=512, | |
temperature=0.7, | |
top_p=0.95, | |
): | |
""" | |
Respond to user queries using the AgriQBot model. | |
Args: | |
- message: User query (string). | |
- history: List of previous (user, assistant) message pairs (default is None). | |
- max_tokens: Maximum number of tokens in the response. | |
- temperature: Controls randomness in response. | |
- top_p: Controls diversity of the response. | |
Returns: | |
- Response string as the chatbot's answer. | |
""" | |
if history is None: | |
history = [] # Initialize history to an empty list if None | |
messages = [{"role": "system", "content": "You are a friendly farming assistant. Answer the user's questions related to farming."}] | |
# Construct the conversation history | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
# Append the current user message | |
messages.append({"role": "user", "content": message}) | |
# Tokenize the input and generate the response | |
inputs = tokenizer(message, return_tensors="pt", padding=True, truncation=True) | |
outputs = model.generate(**inputs, max_length=max_tokens, temperature=temperature, top_p=top_p) | |
# Decode the response and return it | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
def text_to_voice(response): | |
""" | |
Convert the response text to speech using Google Text-to-Speech. | |
Args: | |
- response: Text response from the model to be converted to speech. | |
""" | |
tts = gTTS(text=response, lang='en') | |
tts.save("response.mp3") | |
os.system("start response.mp3") # Use 'open' for macOS, 'xdg-open' for Linux | |
# Build the Gradio Interface | |
demo = gr.Interface( | |
fn=respond, | |
inputs=[ | |
gr.Textbox(label="Enter your question about farming:"), | |
], | |
outputs=[ | |
gr.Textbox(label="Chatbot Response"), | |
gr.Audio(value="response.mp3", label="Audio Response") | |
], | |
title="Farming Assistant Chatbot", | |
description="Ask questions about farming, crop management, pest control, soil conditions, and best agricultural practices." | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
demo.launch() | |