ProPerNounpYK commited on
Commit
a415ec3
1 Parent(s): 98f2c2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -35
app.py CHANGED
@@ -1,48 +1,38 @@
 
1
  import torch
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForImageGeneration
3
  from PIL import Image
4
- import gradio as gr
5
 
6
- # Load the language model
7
- language_model = AutoModelForSequenceClassification.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
- language_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
9
-
10
- # Load the image generation model
11
- image_model = AutoModelForImageGeneration.from_pretrained("artificialguybr/CuteCartoonRedmond-V2")
12
  image_tokenizer = AutoTokenizer.from_pretrained("artificialguybr/CuteCartoonRedmond-V2")
13
 
14
- # Define a function to generate an image based on a prompt
15
- def generate_image(prompt):
16
- input_ids = image_tokenizer.encode(prompt, return_tensors="pt")
17
- output = image_model.generate(input_ids)
 
 
 
 
18
  image = Image.fromarray(output[0].detach().numpy())
19
  return image
20
 
21
- # Define a function to have a conversation
22
- def have_conversation(input_text):
23
- input_ids = language_tokenizer.encode(input_text, return_tensors="pt")
24
- output = language_model.generate(input_ids)
25
- response = language_tokenizer.decode(output[0], skip_special_tokens=True)
26
- return response
27
 
28
  # Create a Gradio interface
29
- iface = gr.Interface(
30
- fn=have_conversation,
31
- inputs="text",
32
- outputs="text",
33
- title="Converse with AI",
34
- description="Talk to the AI and see its response!"
35
- )
36
-
37
- # Add an image generation feature to the interface
38
- iface.add_component(
39
- gr.Image(type="pil"),
40
  inputs="text",
41
- outputs="image",
42
- fn=generate_image,
43
- title="Generate Image",
44
- description="Enter a prompt and see the generated image!"
45
  )
46
 
47
- # Launch the interface
48
- iface.launch()
 
1
+ import gradio as gr
2
  import torch
3
+ from transformers import AutoModelForSequenceToSequence, AutoTokenizer
4
  from PIL import Image
5
+ from torchvision import transforms
6
 
7
+ # Load the Text-to-Image model
8
+ image_model = AutoModelForSequenceToSequence.from_pretrained("artificialguybr/CuteCartoonRedmond-V2")
 
 
 
 
9
  image_tokenizer = AutoTokenizer.from_pretrained("artificialguybr/CuteCartoonRedmond-V2")
10
 
11
+ # Load the Text Generation model
12
+ text_model = AutoModelForSequenceToSequence.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
13
+ text_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
14
+
15
+ # Define a function to generate an image from text
16
+ def generate_image(text):
17
+ inputs = image_tokenizer(text, return_tensors="pt")
18
+ output = image_model.generate(inputs["input_ids"], attention_mask=inputs["attention_mask"])
19
  image = Image.fromarray(output[0].detach().numpy())
20
  return image
21
 
22
+ # Define a function to generate text from text
23
+ def generate_text(text):
24
+ inputs = text_tokenizer(text, return_tensors="pt")
25
+ output = text_model.generate(inputs["input_ids"], attention_mask=inputs["attention_mask"])
26
+ return text_tokenizer.decode(output[0], skip_special_tokens=True)
 
27
 
28
  # Create a Gradio interface
29
+ demo = gr.Interface(
30
+ fn=lambda text: {"image": generate_image(text), "text": generate_text(text)},
 
 
 
 
 
 
 
 
 
31
  inputs="text",
32
+ outputs=["image", "text"],
33
+ title="Text-to-Image and Text Generation",
34
+ description="Enter a prompt to generate both an image and text!"
 
35
  )
36
 
37
+ # Launch the Gradio app
38
+ demo.launch()