Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
4 |
+
|
5 |
+
# Replace with your model name
|
6 |
+
#MODEL_NAME = "ssirikon/Gemma7b-bnb-Unsloth"
|
7 |
+
MODEL_NAME = "unsloth/gemma-7b-bnb-4bit"
|
8 |
+
|
9 |
+
# Load the model and tokenizer
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(
|
11 |
+
MODEL_NAME,
|
12 |
+
device_map="auto",
|
13 |
+
torch_dtype=torch.float16,
|
14 |
+
load_in_4bit=True, # Load the model in 4-bit precision
|
15 |
+
# Removed the unsupported argument
|
16 |
+
)
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
18 |
+
|
19 |
+
# **Change 1: Set `llm_int8_skip_modules` to avoid deep copy**
|
20 |
+
#model.quantization_config.llm_int8_skip_modules = ['lm_head']
|
21 |
+
|
22 |
+
# Create a pipeline for text generation
|
23 |
+
generator = pipeline(
|
24 |
+
task="text-generation",
|
25 |
+
model=model,
|
26 |
+
tokenizer=tokenizer,
|
27 |
+
max_new_tokens=50, # Adjust as needed
|
28 |
+
do_sample=True,
|
29 |
+
top_k=10,
|
30 |
+
num_return_sequences=1,
|
31 |
+
eos_token_id=tokenizer.eos_token_id,
|
32 |
+
)
|
33 |
+
|
34 |
+
def generate_text(email):
|
35 |
+
result = generator("Generate a subject line for the following email.\n"+email)
|
36 |
+
return result[0]["generated_text"]
|
37 |
+
|
38 |
+
|
39 |
+
# Create a Gradio interface
|
40 |
+
demo = gr.Interface(
|
41 |
+
fn=generate_text,
|
42 |
+
inputs=gr.Textbox(lines=5, label="Enter your Email here:"),
|
43 |
+
outputs=gr.Textbox(label="Generated Subject"),
|
44 |
+
title="Email Subject Generation demo",
|
45 |
+
description="Enter an email and let the model generate the subject for you!",
|
46 |
+
)
|
47 |
+
|
48 |
+
demo.launch()
|