import torch
import gradio as gr

from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer ,pipeline, BitsAndBytesConfig
config = PeftConfig.from_pretrained("ShishuTripathi/entity_coder")
model = AutoModelForCausalLM.from_pretrained("ybelkada/falcon-7b-sharded-bf16",trust_remote_code=True)
model = PeftModel.from_pretrained(model,"ShishuTripathi/entity_coder")
tokenizer = AutoTokenizer.from_pretrained("ShishuTripathi/entity_coder")
generator = pipeline('text-generation' , model = model, tokenizer =tokenizer, max_length = 50)

def text_generation(input_text):
    prompt = f"### Narrative: {input_text} \n ### Reported Term:"
    out = generator(prompt)
    output = out[0]['generated_text'].replace('|endoftext|',' ').strip()
    return output

title = "Preferred Term Extractor and Coder"
description = "The term used to describe an adverse event in the Database of Adverse Event Notifications - medicines is the MedDRA 'preferred term', which describes a single medical concept"

gr.Interface(
    text_generation,
    [gr.inputs.Textbox(lines=2, label="Enter Narrative or Phrase")],
    [gr.outputs.Textbox(type="text", label="Extracted Preffered Term")],
    title=title,
    description=description,
    theme="huggingface"
).launch()