File size: 1,200 Bytes
4f4cade
 
ee27115
 
 
 
 
716f60f
ee27115
716f60f
 
 
 
08cf69c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee27115
bbd82c5
271399f
ee27115
 
 
 
 
716f60f
ee27115
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import gradio as gr

#gr.Interface.load("models/nitinbhayana/Llama-2-7b-chat-hf-review-phrases-sentiments-v2").launch()


from transformers import pipeline

#pipeline = pipeline("text-generation", model="nitinbhayana/Llama-2-7b-chat-hf-review-phrases-sentiments-v2")


pipeline = pipeline("text-generation", model="nitinbhayana/TinyLlama-1.1B-Chat-v1.0-title-suggestion-v1.0")

def predict(title):
    prompt=f"""<s>[INST] <<SYS>> You are a helpful assistant that provides accurate and concise responses. <</SYS>>
Create a new, easy to read, and error free title for a given Ecommerce product title.
[Title] {title} [/Title]
[/INST]
### Suggested Title:"""
#     prompt=f"""<s>[INST] <<SYS>>
# You are a helpful, respectful, and honest assistant for product feature mapping.
# <</SYS>>

# [INST]
# For a given e-commerce product title.
# [TITLE]
# {title}
# [/TITLE]
# Create a new, easy-to-read, error-free title.
# [/INST] 

# Suggested Title:
# """
    predictions = pipeline(prompt)
    output=((predictions[0]['generated_text']).split('Suggested Title:')[-1]).strip()
    return (output)

gr.Interface(
    predict,
    inputs='text',
    outputs='text',
    title="Title Suggestion",
).launch()