vilarin commited on
Commit
2dc3f5b
1 Parent(s): f8a3d66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -4,12 +4,11 @@ import gradio as gr
4
  import numpy as np
5
  import random
6
  #from diffusers import FluxPipeline
7
- from huggingface_hub import AsyncInferenceClient
8
  from translatepy import Translator
9
  #from huggingface_hub import hf_hub_download
10
  import requests
11
  import re
12
- import asyncio
13
  from PIL import Image
14
 
15
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
@@ -32,8 +31,7 @@ JS = """function () {
32
  }
33
  }"""
34
 
35
- client1 = AsyncInferenceClient()
36
- client2 = AsyncInferenceClient()
37
 
38
  def enable_lora(lora_in, lora_add):
39
  if not lora_in and not lora_add:
@@ -43,7 +41,7 @@ def enable_lora(lora_in, lora_add):
43
  lora_in = lora_add
44
  return lora_in
45
 
46
- async def generate_image(
47
  prompt:str,
48
  model:str,
49
  width:int=768,
@@ -61,7 +59,7 @@ async def generate_image(
61
 
62
  #generator = torch.Generator().manual_seed(seed)
63
 
64
- image1 = await client1.text_to_image(
65
  prompt=text,
66
  height=height,
67
  width=width,
@@ -69,7 +67,7 @@ async def generate_image(
69
  num_inference_steps=steps,
70
  model=basemodel,
71
  )
72
- image2 = await client2.text_to_image(
73
  prompt=text,
74
  height=height,
75
  width=width,
@@ -79,7 +77,7 @@ async def generate_image(
79
  )
80
  return image1, image2, seed
81
 
82
- async def gen(
83
  prompt:str,
84
  lora_in:str="",
85
  lora_add:str="",
@@ -92,7 +90,7 @@ async def gen(
92
  ):
93
  model = enable_lora(lora_in, lora_add)
94
  print(model)
95
- image1, image2, seed = await generate_image(prompt,model,width,height,scales,steps,seed)
96
  return image1, image2, seed
97
 
98
 
 
4
  import numpy as np
5
  import random
6
  #from diffusers import FluxPipeline
7
+ from huggingface_hub import InferenceClient
8
  from translatepy import Translator
9
  #from huggingface_hub import hf_hub_download
10
  import requests
11
  import re
 
12
  from PIL import Image
13
 
14
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
 
31
  }
32
  }"""
33
 
34
+ client = InferenceClient()
 
35
 
36
  def enable_lora(lora_in, lora_add):
37
  if not lora_in and not lora_add:
 
41
  lora_in = lora_add
42
  return lora_in
43
 
44
+ def generate_image(
45
  prompt:str,
46
  model:str,
47
  width:int=768,
 
59
 
60
  #generator = torch.Generator().manual_seed(seed)
61
 
62
+ image1 = client.text_to_image(
63
  prompt=text,
64
  height=height,
65
  width=width,
 
67
  num_inference_steps=steps,
68
  model=basemodel,
69
  )
70
+ image2 = client.text_to_image(
71
  prompt=text,
72
  height=height,
73
  width=width,
 
77
  )
78
  return image1, image2, seed
79
 
80
+ def gen(
81
  prompt:str,
82
  lora_in:str="",
83
  lora_add:str="",
 
90
  ):
91
  model = enable_lora(lora_in, lora_add)
92
  print(model)
93
+ image1, image2, seed = generate_image(prompt,model,width,height,scales,steps,seed)
94
  return image1, image2, seed
95
 
96