Spaces:
Sleeping
Sleeping
import gradio as gr | |
from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline | |
from transformers import BitsAndBytesConfig | |
import re | |
from deep_translator import (GoogleTranslator, | |
PonsTranslator, | |
LingueeTranslator, | |
MyMemoryTranslator, | |
YandexTranslator, | |
DeeplTranslator, | |
QcriTranslator, | |
single_detection, | |
batch_detection) | |
from pyaspeller import YandexSpeller | |
def error_correct_pyspeller(sample_text): | |
""" grammer correction of input text""" | |
speller = YandexSpeller() | |
fixed = speller.spelled(sample_text) | |
return fixed | |
def postprocerssing(inp_text: str): | |
"""Post preocessing of the llm response""" | |
inp_text = re.sub('<[^>]+>', '', inp_text) | |
inp_text = inp_text.split('##', 1)[0] | |
inp_text = error_correct_pyspeller(inp_text) | |
return inp_text | |
quantization_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
bnb_4bit_quant_type="nf4", | |
bnb_4bit_compute_dtype="float16", | |
bnb_4bit_use_double_quant=True, | |
) | |
llm = HuggingFacePipeline.from_model_id( | |
model_id="Danielrahmai1991/nvlm_adapt_basic_model_16bit", | |
task="text-generation", | |
pipeline_kwargs=dict( | |
max_new_tokens=512, | |
do_sample=True, | |
repetition_penalty=1.15, | |
trust_remote_code= True, | |
temperature= 0.75 | |
), | |
model_kwargs={"quantization_config": quantization_config, | |
}, | |
) | |
chat_model = ChatHuggingFace(llm=llm) | |
# history of the messages | |
def clear_memory(messages): | |
messages.clear() | |
return "Memory cleaned." | |
def llm_run(prompt, messages): | |
print(f"question is {prompt}") | |
lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11') | |
if lang == 'en': | |
prompt = error_correct_pyspeller(prompt) | |
en_translated = GoogleTranslator(source='auto', target='en').translate(prompt) | |
messages.append({"role": "user", "content": en_translated}) | |
ai_msg = chat_model.invoke(messages, skip_prompt = True) | |
response_of_llm = postprocerssing(ai_msg.content) | |
messages.append({"role": "assistant", "content": response_of_llm}) | |
response_of_llm = GoogleTranslator(source='auto', target=lang).translate(response_of_llm) | |
print(f"out is: {response_of_llm}") | |
return response_of_llm | |
# def greet(prompt, m_type): | |
# return "hi" | |
print("donnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn") | |
with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo: | |
stored_message = gr.State([]) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
text1 = gr.Textbox(lines=7, label="Prompt", scale=2) | |
with gr.Row(): | |
btn1 = gr.Button("Submit", scale=1) | |
btn2 = gr.Button("Clear", scale=1) | |
btn3 = gr.Button("Clean Memory", scale=2) | |
with gr.Column(scale=2): | |
out_text = gr.Text(lines=15, label="Output", scale=2) | |
btn1.click(fn=llm_run, inputs=[text1, stored_message], outputs=out_text) | |
btn2.click(lambda: [None, None], outputs=[text1, out_text]) | |
btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text]) | |
# demo = gr.Interface(fn=llm_run, inputs=["text"], outputs="text") | |
demo.launch(debug=True, share=True) | |
# import gradio as gr | |
# def greet(inp, messages): | |
# messages.append(inp) | |
# print(messages) | |
# return "Hello " + inp + "!" | |
# def clear_memory(messages): | |
# messages.clear() | |
# return "Memory cleaned." | |
# with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo: | |
# stored_message = gr.State([]) | |
# with gr.Row(): | |
# with gr.Column(scale=2): | |
# text1 = gr.Textbox(lines=7, label="Prompt", scale=2) | |
# with gr.Row(): | |
# btn1 = gr.Button("Submit", scale=1) | |
# btn2 = gr.Button("Clear", scale=1) | |
# btn3 = gr.Button("Clean Memory", scale=2) | |
# with gr.Column(scale=2): | |
# out_text = gr.Text(lines=15, label="Output", scale=2) | |
# btn1.click(fn=greet, inputs=[text1, stored_message], outputs=out_text) | |
# btn2.click(lambda: [None, None], outputs=[text1, out_text]) | |
# btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text]) | |
# demo.launch() | |