from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
from langchain_core.messages import AIMessage
from langchain.memory import ChatMessageHistory
from langchain_openai import AzureChatOpenAI
from pypdf import PdfReader
import os
import gradio as gr
from openai import AzureOpenAI
from gtts import gTTS
import requests


hf_token = os.getenv("HF_TOKEN")
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-medium.en"
headers = {"Authorization": f"Bearer {hf_token}"}

class ScreeningAssistant:
    def __init__(self):
        self.client = AzureOpenAI(api_version="2024-02-01",azure_endpoint = os.getenv('URL'))
        # self.client = OpenAI()

    def extract_text(self, pdf_path):
        # creating a pdf reader object
        reader = PdfReader(pdf_path)
        all_text = ""

        for page in reader.pages:
            all_text += page.extract_text()
        return all_text

    def audio_to_text(self,audio_file):
        deployment_id = "whisper07" #This will correspond to the custom name you chose for your deployment when you deployed a model."
        audio_test_file = audio_file
        print("audio_test_file",audio_test_file)
        result = self.client.audio.transcriptions.create(
            file=open(audio_test_file, "rb"),            
            model=deployment_id
        )
        return result.text


    def text_to_audio(self, mytext):
        # Language in which you want to convert
        language = 'en'
        # have a high speed
        myobj = gTTS(text=mytext, lang=language, slow=False)
        
        audio_path = "welcome.mp3"
        # Saving the converted audio in a mp3 file named
        # welcome 
        myobj.save(audio_path)

        # Audio(filename=audio_path, autoplay=True)

        # os.remove(audio_path)
        return audio_path

    def get_response(self, audio_path, chat_history, resume, jd):
        
        candidate = self.audio_to_text(audio_path)
        resume = self.extract_text(resume.name)
        jd = self.extract_text(jd.name)

        prompt = ChatPromptTemplate.from_messages(
            [
                (
                    "system",
                    """Your Task is Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
                      at the end exit with greeting to the candidate.
                    **Ask question follow up on the candidate response. get chat history.**
                    """,
                ),
                MessagesPlaceholder(variable_name="messages"),
            ]
        )

        chain = prompt | self.chat  

        # chat_histroy_prompt = chat_history

        answer = chain.invoke(
            {
                "messages": [
                    HumanMessage(
                        content=f" job description :{jd}\n Resume :{resume}"
                    ),
                    AIMessage(content=f"""Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
                    chat history : {chat_history}"""),
                    HumanMessage(content=candidate),
                ],
            }
        )

        result = answer.content
        chat_history.append((candidate, result))
        print("chat_history", chat_history)
        audio_output = self.text_to_audio(result)
        return "", chat_history, audio_output

    def gradio_interface(self) -> None:
        """Create a Gradio interface for the chatbot."""
        with gr.Blocks(css = "style.css" ,theme="shivi/calm_seafoam") as demo:

            gr.HTML("""
            <center class="darkblue" style="text-align:center;padding:30px;">
            <center>
            <img src="https://amosszeps.com/wp-content/uploads/2021/12/llyods-bank.png" alt="LLOYODS BANK">
            <h1 style="color:#006E49; font-weight: bold;">Screening Assistant</h1>
            </center>
            """)

            with gr.Row():
                with gr.Column(scale=0.80):   
                    chatbot = gr.Chatbot()
                with gr.Column(scale=0.20):
                    with gr.Row():
                        resume = gr.File(label="Resume")
                    with gr.Row():
                        jd = gr.File(label="Job Description")
            with gr.Row():
                with gr.Column(scale=0.80):
                    msg = gr.Textbox(label="Question", show_label=False)
                with gr.Column(scale=0.20):
                    clear = gr.ClearButton([chatbot])
            with gr.Row():
                with gr.Column(scale=0.50):
                    audio_path = gr.Audio(sources=["microphone"], type="filepath")
                with gr.Column(scale=0.50):
                    play_audio = gr.Audio( value=None, autoplay=True)                                        

            audio_path.stop_recording(self.get_response, [audio_path, chatbot, resume, jd], [msg, chatbot, play_audio])

        demo.launch()   

if __name__=="__main__":
    assistant = ScreeningAssistant()
    assistant.gradio_interface()