english-learning-copilot / app_utils.py
DiamondYin's picture
Update app_utils.py
19aff59
import os
import whisper
from io import BytesIO # BytesIO is a class in the io module that implements an in-memory file-like object.
import base64
import boto3 # AWS Polly
from pydub import AudioSegment # AudioSegment is a class in the pydub module that can be used to manipulate audio files.
from pydub.playback import play # play is a function in the pydub.playback module that can be used to play audio files.
import logging
from langchain import OpenAI
from langchain.chains import RetrievalQA # RetrievalQA is a class in the langchain.chains module that can be used to build a retrieval-based question answering system.
from langchain.vectorstores import Chroma # Chroma is a class in the langchain.vectorstores module that can be used to store vectors.
from langchain.document_loaders import DirectoryLoader #
from langchain.embeddings.openai import OpenAIEmbeddings # OpenAIGPTEmbeddings
from langchain.text_splitter import CharacterTextSplitter # CharacterTextSplitter is a class in the langchain.text_splitter module that can be used to split text into chunks.
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_REGION_NAME = 'ap-south-1'
logging.basicConfig(level="INFO",
filename='conversations.log',
filemode='a',
format='%(asctime)s %(message)s',
datefmt='%H:%M:%S')
def buzz_user():
input_prompt = AudioSegment.from_mp3('assets/timeout_audio.mp3')
play(input_prompt)
def initialize_knowledge_base():
loader = DirectoryLoader('profiles', glob='**/*.txt') #文件夹加载器 profiles文件夹下的所有txt文件
docs = loader.load()
char_text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
doc_texts = char_text_splitter.split_documents(docs)
#调整embeddings
#from langchain.embeddings import HuggingFaceEmbeddings
#embedding = HuggingFaceEmbeddings(model_name = "hkunlp/instructor-large")
openAI_embeddings = OpenAIEmbeddings()
vStore = Chroma.from_documents(doc_texts, openAI_embeddings)
#调整模型
conv_model = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=vStore.as_retriever(
search_kwargs={"k": 1}
)
)
voice_model = whisper.load_model("tiny") #加载模型 tiny模型 tiny模型是一个小型的语音识别模型,它的大小只有 50MB 左右,但是它的准确率却非常高,可以达到 95% 以上。
return conv_model, voice_model
def text_to_speech_gen(answer): #文字转语音
polly = boto3.client('polly',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION_NAME)
response = polly.synthesize_speech(
Text=answer,
VoiceId='Matthew',
OutputFormat='mp3',
Engine = "neural")
audio_stream = response['AudioStream'].read()
audio_html = audio_to_html(audio_stream)
return audio_html
def audio_to_html(audio_bytes): #音频转html
audio_io = BytesIO(audio_bytes)
audio_io.seek(0)
audio_base64 = base64.b64encode(audio_io.read()).decode("utf-8")
audio_html = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls autoplay></audio>'
return audio_html
def get_chat_history(user_message, history):
return "", history + [[user_message, None]]