File size: 2,689 Bytes
4c5266d
d11b9b3
 
 
4c5266d
d11b9b3
 
 
4c5266d
 
 
fcf4c91
 
 
 
4c5266d
d11b9b3
 
 
 
fcf4c91
d11b9b3
 
 
 
 
 
 
fcf4c91
 
 
 
4c5266d
d11b9b3
4c5266d
 
 
 
 
 
d11b9b3
4c5266d
 
d11b9b3
 
 
4c5266d
d11b9b3
 
 
4c5266d
d11b9b3
 
 
 
 
 
 
 
 
4c5266d
d11b9b3
 
 
 
 
4c5266d
d11b9b3
 
 
 
 
 
4c5266d
 
d11b9b3
4c5266d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from gtts import gTTS
import os

# Load the AgriQBot model from Hugging Face using the transformers library
tokenizer = AutoTokenizer.from_pretrained("mrSoul7766/AgriQBot")
model = AutoModelForSeq2SeqLM.from_pretrained("mrSoul7766/AgriQBot")

def respond(
    message,
    history=None,  # Set history default to None
    max_tokens=512,
    temperature=0.7,
    top_p=0.95,
):
    """
    Respond to user queries using the AgriQBot model.
    Args:
    - message: User query (string).
    - history: List of previous (user, assistant) message pairs (default is None).
    - max_tokens: Maximum number of tokens in the response.
    - temperature: Controls randomness in response.
    - top_p: Controls diversity of the response.
    
    Returns:
    - Response string as the chatbot's answer.
    """
    if history is None:
        history = []  # Initialize history to an empty list if None
    
    messages = [{"role": "system", "content": "You are a friendly farming assistant. Answer the user's questions related to farming."}]

    # Construct the conversation history
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    # Append the current user message
    messages.append({"role": "user", "content": message})

    # Tokenize the input and generate the response
    inputs = tokenizer(message, return_tensors="pt", padding=True, truncation=True)
    outputs = model.generate(**inputs, max_length=max_tokens, temperature=temperature, top_p=top_p)

    # Decode the response and return it
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

def text_to_voice(response):
    """
    Convert the response text to speech using Google Text-to-Speech.
    Args:
    - response: Text response from the model to be converted to speech.
    """
    tts = gTTS(text=response, lang='en')
    tts.save("response.mp3")
    os.system("start response.mp3")  # Use 'open' for macOS, 'xdg-open' for Linux

# Build the Gradio Interface
demo = gr.Interface(
    fn=respond,
    inputs=[
        gr.Textbox(label="Enter your question about farming:"),
    ],
    outputs=[
        gr.Textbox(label="Chatbot Response"),
        gr.Audio(value="response.mp3", label="Audio Response")
    ],
    title="Farming Assistant Chatbot",
    description="Ask questions about farming, crop management, pest control, soil conditions, and best agricultural practices."
)

# Launch the interface
if __name__ == "__main__":
    demo.launch()