from transformers import (AutoModelForCausalLM, AutoTokenizer, GenerationConfig) from peft import PeftModel import gradio as gr # Memuat model base_model = AutoModelForCausalLM.from_pretrained( "BioMistral/BioMistral-7B", load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(base_model, "sayyid14/BioMistralCancer5epoch") generation_config = GenerationConfig( do_sample=False, num_beams=4, repetition_penalty=1.15, ) # Load tokenizer tokenizer = AutoTokenizer.from_pretrained("BioMistral/BioMistral-7B", trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token # Fungsi respons def chatbot_response(user_input): PROMPT = f"""Below is a question about cancer. Please answer this question correctly. If you don't know the answer just say that you don't know and don't share false information. ### Question: {user_input} ### Answer:""" inputs = tokenizer(PROMPT, return_tensors="pt") input_ids = inputs["input_ids"] print("Generating...") generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, max_new_tokens=100, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id ) for s in generation_output.sequences: result = tokenizer.decode(s).split("### Answer:")[1] return result.strip() # Membuat antarmuka iface = gr.Interface( fn=chatbot_response, inputs=gr.Textbox(placeholder="Type your question...", label="User"), outputs=gr.Textbox(label="Bot"), title="BioMistralCancer", description="Ask anything about Cancer", css=""" body { background-size: cover; background-repeat: no-repeat; background-position: center; height: 100vh; margin: 0; } .gradio-container { background-color: rgba(255, 255, 255, 0.8); border-radius: 10px; padding: 20px; backdrop-filter: blur(10px); } """ ) # Menjalankan aplikasi iface.launch(share=True)