Spaces:
Sleeping
Sleeping
File size: 2,927 Bytes
2284ae0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import os
import numpy as np
import pandas as pd
import openai
from haystack.schema import Document
import streamlit as st
from tenacity import retry, stop_after_attempt, wait_random_exponential
from huggingface_hub import InferenceClient
# Get openai API key
hf_token = os.environ["HF_API_KEY"]
# define a special function for putting the prompt together (as we can't use haystack)
def get_prompt(context, label):
base_prompt="Summarize the following context efficiently in bullet points, the less the better - but keep concrete goals. \
Summarize only elements of the context that address vulnerability of "+label+" to climate change. \
If there is no mention of "+label+" in the context, return: 'No clear references to vulnerability of "+label+" found'. \
Do not include an introduction sentence, just the bullet points as per below. \
Formatting example: \
- Bullet point 1 \
- Bullet point 2 \
"
prompt = base_prompt+"; Context: "+context+"; Answer:"
return prompt
# # exception handling for issuing multiple API calls to openai (exponential backoff)
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
# def completion_with_backoff(**kwargs):
# return openai.ChatCompletion.create(**kwargs)
class ChatCompletionResult:
def __init__(self):
self.content = ""
def add_content(self, text):
self.content += text
def get_full_content(self):
return self.content.strip()
def run_query(context, label, model_sel_name):
'''
Summarize provided test
'''
chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents."""
messages = [{"role": "system", "content": chatbot_role},{"role": "user", "content": get_prompt(context, label)}]
# Initialize the client, pointing it to one of the available models
client = InferenceClient(model_sel_name, token=hf_token)
# Instantiate ChatCompletion as a generator object (stream is set to True)
chat_completion = client.chat.completions.create(
messages=messages,
stream=True
)
# Create an object to store the full chat completion
completion_result = ChatCompletionResult()
res_box = st.empty()
# Iterate through the streamed output
for chunk in chat_completion:
# Extract the object containing the text
if chunk.choices is not None:
chunk_message = chunk.choices[0].delta
if 'content' in chunk_message:
completion_result.add_content(chunk_message['content']) # Store the message
# Add the latest text and merge it with all previous
result = completion_result.get_full_content()
res_box.success(result) # Output to response text box
# Return the stored chat completion object for later use
return completion_result
|