# Import the necessary Libraries import json import uuid import os from openai import OpenAI import gradio as gr from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings ) from langchain_community.vectorstores import Chroma from huggingface_hub import CommitScheduler from pathlib import Path import os os.environ['OPENAI_API_KEY'] = "gl-U2FsdGVkX18e2Pmna5tn6g6u7mqi55sN7xcOMntKGypQnR3Y4CQK5VfbJYc0Nt7c" os.environ["OPENAI_BASE_URL"] = "https://aibe.mygreatlearning.com/openai/v1" # Create Client client = OpenAI() model_name = 'gpt-4o-mini' embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large') streamlit_collection = 'reports_collection' vectorstore_persisted = Chroma( collection_name=streamlit_collection, persist_directory='./reports_db', embedding_function=embedding_model ) # Prepare the logging functionality log_file = Path("logs/") / f"data_{uuid.uuid4()}.json" log_folder = log_file.parent scheduler = CommitScheduler( repo_id="reports-qna", repo_type="dataset", folder_path=log_folder, path_in_repo="data", every=2 ) qna_system_message = """ You are an assistant to a Financial Analyst. Your task is to summarize and provide relevant information to the financial analyst's question based on the provided context. User input will include the necessary context for you to answer their questions. This context will begin with the token: ###Context. The context contains references to specific portions of documents relevant to the user's query, along with page number from the report. The source for the context will begin with the token ###Page When crafting your response: 1. Select only context relevant to answer the question. 2. Include the source links in your response. 3. User questions will begin with the token: ###Question. 4. If the question is irrelevant or if you do not have the information to respond with - "Sorry, this is out of my knowledge base" Please adhere to the following guidelines: - Your response should only be about the question asked and nothing else. - Answer only using the context provided. - Do not mention anything about the context in your final answer. - If the answer is not found in the context, it is very very important for you to respond with "Sorry, this is out of my knowledge base" - Always quote the page number when you use the context. Cite the relevant page number at the end of your response under the section - Page: - Do not make up sources Use the links provided in the sources section of the context and nothing else. You are prohibited from providing other links/sources. Here is an example of how to structure your response: Answer: [Answer] Page: [Page number] """ qna_user_message_template = """ ###Context Here are some documents and their page number that are relevant to the question mentioned below. {context} ###Question {question} """ # Define the predict function that runs when 'Submit' is clicked or when a API request is made def predict(user_input,company): filter = "dataset/"+company+"-10-k-2023.pdf" relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":filter}) context_list = [d.page_content + "\n ###Page: " + str(d.metadata['page']) + "\n\n " for d in relevant_document_chunks] context_for_query = ".".join(context_list) prompt = [ {'role':'system', 'content': qna_system_message}, {'role': 'user', 'content': qna_user_message_template.format( context=context_for_query, question=user_input ) } ] try: response = client.chat.completions.create( model=model_name, messages=prompt, temperature=0 ) prediction = response.choices[0].message.content except Exception as e: prediction = e # While the prediction is made, log both the inputs and outputs to a local log file # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel # access with scheduler.lock: with log_file.open("a") as f: f.write(json.dumps( { 'user_input': user_input, 'retrieved_context': context_for_query, 'model_response': prediction } )) f.write("\n") return prediction textbox = gr.Textbox(placeholder="Enter your query here", lines=6) company = gr.Radio(choices=["google", "msft", "aws", "ibm", "meta"], label="Select the company") # Create the interface demo = gr.Interface( inputs=[textbox, company], fn=predict, outputs="text", title="10-k Reports Q&A System", description="This web API presents an interface to ask questions on 10-k reports ", concurrency_limit=16 ) demo.queue() demo.launch()