Spaces:
Sleeping
Sleeping
#Running fine:) | |
import gradio as gr | |
import os | |
from langchain_huggingface import HuggingFaceEndpoint | |
from dotenv import load_dotenv | |
from langchain_community.document_loaders import WhatsAppChatLoader | |
from typing import List | |
# Load environment variables | |
load_dotenv() | |
# Get Hugging Face API token | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
# Initialize the HuggingFace model | |
llm = HuggingFaceEndpoint( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.3", | |
huggingfacehub_api_token=HF_TOKEN, | |
temperature=0.7, | |
max_new_tokens=300 | |
) | |
# Load and process chat content | |
def load_chat_content(file) -> str: | |
# Initialize the WhatsAppChatLoader with the uploaded file | |
loader = WhatsAppChatLoader(path=file.name) | |
raw_messages = loader.lazy_load() | |
messages = list(raw_messages) | |
# Combine all messages into a single string | |
chat_content = "\n".join([doc.page_content for doc in messages]) | |
return chat_content | |
def answer_question(file, question: str) -> str: | |
# Load the chat content from the uploaded file | |
chat_content = load_chat_content(file) | |
#prompt="Your task is to generate answer according to {question} based on the given {chat_content}" | |
# Generate a response using the Hugging Face model | |
response = llm(chat_content + "\n\n" + question) | |
#response = llm(prompt) | |
return response | |
# Define the Gradio interface | |
interface = gr.Interface( | |
fn=answer_question, | |
inputs=[ | |
gr.File(label="Upload WhatsApp Chat File"), | |
gr.Textbox(label="Ask a Question", placeholder="Enter your question here...") | |
], | |
outputs="text", | |
title="WhatsApp Chat Q&A", | |
description="Upload a WhatsApp chat file and ask questions related to the chat content.", | |
) | |
if __name__ == "__main__": | |
interface.launch() | |