Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
5 |
# Replace with your model name
|
6 |
#MODEL_NAME = "ssirikon/Gemma7b-bnb-Unsloth"
|
7 |
#MODEL_NAME = "unsloth/gemma-7b-bnb-4bit"
|
8 |
-
MODEL_NAME = "
|
9 |
|
10 |
# Load the model and tokenizer
|
11 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -17,33 +17,40 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
17 |
)
|
18 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
19 |
|
20 |
-
# **Change 1: Set `llm_int8_skip_modules` to avoid deep copy**
|
21 |
-
#model.quantization_config.llm_int8_skip_modules = ['lm_head']
|
22 |
-
|
23 |
-
# Create a pipeline for text generation
|
24 |
-
generator = pipeline(
|
25 |
-
task="summarization",
|
26 |
-
model=model,
|
27 |
-
tokenizer=tokenizer,
|
28 |
-
max_new_tokens=50, # Adjust as needed
|
29 |
-
do_sample=True,
|
30 |
-
top_k=10,
|
31 |
-
num_return_sequences=1,
|
32 |
-
eos_token_id=tokenizer.eos_token_id,
|
33 |
-
)
|
34 |
-
|
35 |
-
def generate_text(email):
|
36 |
-
result = generator("Generate a subject line for the following email.\n"+email)
|
37 |
-
return result[0]["generated_text"]
|
38 |
-
|
39 |
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
demo = gr.Interface(
|
42 |
-
fn=
|
43 |
-
inputs=gr.Textbox(lines=
|
44 |
-
outputs=gr.Textbox(label="Generated Subject")
|
45 |
-
title="Email Subject Generation demo",
|
46 |
-
description="Enter an email and let the model generate the subject for you!",
|
47 |
)
|
48 |
|
49 |
-
demo.launch(
|
|
|
5 |
# Replace with your model name
|
6 |
#MODEL_NAME = "ssirikon/Gemma7b-bnb-Unsloth"
|
7 |
#MODEL_NAME = "unsloth/gemma-7b-bnb-4bit"
|
8 |
+
MODEL_NAME = "Lohith9459/gemma7b"
|
9 |
|
10 |
# Load the model and tokenizer
|
11 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
17 |
)
|
18 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
def generate_subject(email_body):
|
22 |
+
instruction = "Generate a subject line for the following email."
|
23 |
+
formatted_text = f"""Below is an instruction that describes a task. \
|
24 |
+
Write a response that appropriately completes the request.
|
25 |
+
|
26 |
+
### Instruction:
|
27 |
+
{instruction}
|
28 |
+
|
29 |
+
### Input:
|
30 |
+
{email_body}
|
31 |
+
|
32 |
+
### Response:
|
33 |
+
"""
|
34 |
+
inputs = tokenizer([formatted_text], return_tensors="pt").to("cuda")
|
35 |
+
text_streamer = TextStreamer(tokenizer)
|
36 |
+
generated_ids = model.generate(**inputs, streamer=text_streamer, max_new_tokens=512)
|
37 |
+
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
38 |
+
|
39 |
+
def extract_subject(text):
|
40 |
+
start_tag = "### Response:"
|
41 |
+
start_idx = text.find(start_tag)
|
42 |
+
if start_idx == -1:
|
43 |
+
return None
|
44 |
+
subject = text[start_idx + len(start_tag):].strip()
|
45 |
+
return subject
|
46 |
+
|
47 |
+
return extract_subject(generated_text)
|
48 |
+
|
49 |
+
# Create the Gradio interface
|
50 |
demo = gr.Interface(
|
51 |
+
fn=generate_subject,
|
52 |
+
inputs=gr.Textbox(lines=20, label="Email Body"),
|
53 |
+
outputs=gr.Textbox(label="Generated Subject")
|
|
|
|
|
54 |
)
|
55 |
|
56 |
+
demo.launch()
|