Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
-
|
2 |
-
#import os
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
import torch
|
7 |
import re
|
8 |
import gradio as gr
|
9 |
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
|
|
|
|
|
|
|
|
|
10 |
|
11 |
device='cpu'
|
12 |
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
@@ -17,28 +16,50 @@ tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
|
|
17 |
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
|
18 |
|
19 |
|
20 |
-
def predict(image,max_length=64, num_beams=4):
|
21 |
image = image.convert('RGB')
|
22 |
image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
|
23 |
clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
|
24 |
caption_ids = model.generate(image, max_length = max_length)[0]
|
25 |
caption_text = clean_text(tokenizer.decode(caption_ids))
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
|
29 |
|
30 |
-
input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
|
31 |
-
|
|
|
|
|
32 |
#examples = [f"example{i}.jpg" for i in range(1,7)]
|
33 |
#examples = os.listdir()
|
34 |
-
description= "
|
35 |
-
title = "
|
|
|
36 |
|
37 |
-
article = "Created By :
|
38 |
|
39 |
interface = gr.Interface(
|
40 |
fn=predict,
|
41 |
-
inputs =
|
|
|
42 |
theme="grass",
|
43 |
outputs=output,
|
44 |
# examples = examples,
|
@@ -46,4 +67,7 @@ interface = gr.Interface(
|
|
46 |
description=description,
|
47 |
article = article,
|
48 |
)
|
49 |
-
interface.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import re
|
3 |
import gradio as gr
|
4 |
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
|
5 |
+
import cohere
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
|
10 |
device='cpu'
|
11 |
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
|
|
16 |
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
|
17 |
|
18 |
|
19 |
+
def predict(department,image,max_length=64, num_beams=4):
|
20 |
image = image.convert('RGB')
|
21 |
image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
|
22 |
clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
|
23 |
caption_ids = model.generate(image, max_length = max_length)[0]
|
24 |
caption_text = clean_text(tokenizer.decode(caption_ids))
|
25 |
+
dept=department
|
26 |
+
context= caption_text
|
27 |
+
response = co.generate(
|
28 |
+
model='large',
|
29 |
+
prompt=f'create non offensive one line meme for given department and context\n\ndepartment- data science\ncontext-a man sitting on a bench with a laptop\nmeme- \"I\'m not a data scientist, but I play one on my laptop.\"\n\ndepartment-startup\ncontext-a young boy is smiling while using a laptop\nmeme-\"When your startup gets funded and you can finally afford a new laptop\"\n\ndepartment- {dept}\ncontext-{context}\nmeme-',
|
30 |
+
max_tokens=20,
|
31 |
+
temperature=0.8,
|
32 |
+
k=0,
|
33 |
+
p=0.75,
|
34 |
+
frequency_penalty=0,
|
35 |
+
presence_penalty=0,
|
36 |
+
stop_sequences=["department"],
|
37 |
+
return_likelihoods='NONE')
|
38 |
+
reponse=response.generations[0].text
|
39 |
+
reponse = reponse.replace("department", "")
|
40 |
+
Feedback_SQL="DEPT"+dept+"CAPT"+caption_text+"MAMAY"+reponse
|
41 |
+
|
42 |
+
|
43 |
+
return reponse
|
44 |
|
45 |
|
46 |
|
47 |
+
# input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
|
48 |
+
|
49 |
+
|
50 |
+
output = gr.outputs.Textbox(type="auto",label="Meme")
|
51 |
#examples = [f"example{i}.jpg" for i in range(1,7)]
|
52 |
#examples = os.listdir()
|
53 |
+
description= "meme generation using advanced NLP "
|
54 |
+
title = "Meme world 🖼️"
|
55 |
+
dropdown=["data science ", "product management","marketing","startup" ,"agile","crypto" , "SEO" ]
|
56 |
|
57 |
+
article = "Created By : Xaheen "
|
58 |
|
59 |
interface = gr.Interface(
|
60 |
fn=predict,
|
61 |
+
inputs = [gr.inputs.Dropdown(dropdown),gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)],
|
62 |
+
|
63 |
theme="grass",
|
64 |
outputs=output,
|
65 |
# examples = examples,
|
|
|
67 |
description=description,
|
68 |
article = article,
|
69 |
)
|
70 |
+
interface.launch(debug=True)
|
71 |
+
|
72 |
+
|
73 |
+
# c0here2022
|