Spaces:
Runtime error
Runtime error
import os | |
import whisper | |
from io import BytesIO # BytesIO is a class in the io module that implements an in-memory file-like object. | |
import base64 | |
import boto3 # AWS Polly | |
from pydub import AudioSegment # AudioSegment is a class in the pydub module that can be used to manipulate audio files. | |
from pydub.playback import play # play is a function in the pydub.playback module that can be used to play audio files. | |
import logging | |
import numpy as np | |
import openai | |
from langchain import OpenAI | |
from langchain.chains import RetrievalQA # RetrievalQA is a class in the langchain.chains module that can be used to build a retrieval-based question answering system. | |
from langchain.vectorstores import Chroma # Chroma is a class in the langchain.vectorstores module that can be used to store vectors. | |
from langchain.document_loaders import DirectoryLoader # | |
from langchain.embeddings.openai import OpenAIEmbeddings # OpenAIGPTEmbeddings | |
from langchain.text_splitter import CharacterTextSplitter # CharacterTextSplitter is a class in the langchain.text_splitter module that can be used to split text into chunks. | |
#import streamlit as st | |
from tenacity import ( | |
retry, | |
stop_after_attempt, | |
wait_random_exponential, | |
) | |
# FUNCTIONS | |
# get embeddings | |
#@st.cache_data | |
def embedding_from_string(input: str, model: str) -> list: | |
response = openai.Embedding.create(input=input, model=model) | |
embedding = response["data"][0]["embedding"] | |
return embedding | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID') | |
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY') | |
AWS_REGION_NAME = 'ap-south-1' | |
logging.basicConfig(level="INFO", | |
filename='conversations.log', | |
filemode='a', | |
format='%(asctime)s %(message)s', | |
datefmt='%H:%M:%S') | |
def buzz_user(): | |
input_prompt = AudioSegment.from_mp3('assets/timeout_audio.mp3') | |
play(input_prompt) | |
def initialize_knowledge_base(): | |
loader = DirectoryLoader('profiles', glob='**/*.txt') #文件夹加载器 profiles文件夹下的所有txt文件 | |
docs = loader.load() | |
char_text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) #文本分割器 chunk_size=1000, chunk_overlap=0 | |
doc_texts = char_text_splitter.split_documents(docs) #文档分割器,作用是将文档分割成小块 | |
# Embed each chunk of text | |
embeddings = [] | |
openAI_embeddings = OpenAIEmbeddings() | |
for doc in doc_texts: | |
text = str(doc) | |
#embedding = openAI_embeddings.embed_documents(text) | |
#embeddings.append(embedding) | |
embedding = embedding_from_string(text, "text-embedding-ada-002") | |
embeddings.append(embedding) | |
vStore = np.concatenate(embeddings, axis=0) | |
#openAI_embeddings = OpenAIEmbeddings() | |
#vStore = Chroma.from_documents(doc_texts, openAI_embeddings) #Chroma是一个类,用于存储向量,from_documents是一个方法,用于从文档中创建向量存储器,openAI_embeddings是一个类,用于将文本转换为向量 | |
conv_model = RetrievalQA.from_chain_type( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k"), | |
chain_type="stuff", | |
retriever=vStore.as_retriever( | |
search_kwargs={"k": 1} | |
) | |
) | |
voice_model = whisper.load_model("tiny") #加载模型 tiny模型 tiny模型是一个小型的语音识别模型,它的大小只有 50MB 左右,但是它的准确率却非常高,可以达到 95% 以上。 | |
return conv_model, voice_model | |
def text_to_speech_gen(answer): #文字转语音 | |
polly = boto3.client('polly', | |
aws_access_key_id=AWS_ACCESS_KEY_ID, | |
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, | |
region_name=AWS_REGION_NAME) | |
response = polly.synthesize_speech( | |
Text=answer, | |
#VoiceId='Matthew', | |
VoiceId='Zhiyu', | |
OutputFormat='mp3', | |
#Engine = "neural" | |
Engine = "standard") | |
audio_stream = response['AudioStream'].read() | |
audio_html = audio_to_html(audio_stream) | |
return audio_html | |
def audio_to_html(audio_bytes): #音频转html | |
audio_io = BytesIO(audio_bytes) | |
audio_io.seek(0) | |
audio_base64 = base64.b64encode(audio_io.read()).decode("utf-8") | |
audio_html = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls autoplay></audio>' | |
return audio_html | |
def get_chat_history(user_message, history): | |
return "", history + [[user_message, None]] | |