salomonsky commited on
Commit
71301fd
verified
1 Parent(s): 0f06ad7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -4
app.py CHANGED
@@ -9,6 +9,8 @@ import random
9
  import numpy as np
10
  import yaml
11
  import traceback
 
 
12
 
13
  try:
14
  with open("config.yaml", "r") as file:
@@ -24,9 +26,25 @@ DATA_PATH.mkdir(exist_ok=True)
24
  PREDEFINED_SEED = random.randint(0, MAX_SEED)
25
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
26
 
 
 
 
 
 
 
 
 
 
 
27
  if not HF_TOKEN_UPSCALER:
28
  st.warning("HF_TOKEN_UPSCALER no est谩 configurado. Algunas funcionalidades pueden no funcionar.")
29
 
 
 
 
 
 
 
30
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
31
  try:
32
  upscale_client = InferenceClient("fal/AuraSR-v2", hf_token=HF_TOKEN_UPSCALER)
@@ -127,7 +145,7 @@ def save_prompt(prompt):
127
  st.success("Prompt guardado.")
128
 
129
  async def improve_prompt(prompt):
130
- if not prompt.strip():
131
  return prompt
132
 
133
  try:
@@ -142,7 +160,7 @@ async def improve_prompt(prompt):
142
  "With this inspiration, realize a cinematic txt2img prompt in English with hyperrealistic elements in 200 characters maximum",
143
  "With my idea, make a lifelike and txt2img prompt in English, focusing on photorealistic depth in 200 characters maximum"
144
  ]
145
- response = llm_client.text_generation(
146
  f"{prompt}: {random.choice(instructions)}",
147
  max_new_tokens=256,
148
  temperature=0.7,
@@ -150,7 +168,8 @@ async def improve_prompt(prompt):
150
  repetition_penalty=1.1,
151
  do_sample=True
152
  )
153
- return (response['generated_text'] if isinstance(response, dict) else str(response))[:200].strip() or prompt
 
154
  except Exception as e:
155
  st.error(f"Error al mejorar prompt: {str(e)}\n{traceback.format_exc()}")
156
  return prompt
@@ -166,6 +185,8 @@ async def generate_variations(prompt, num_variants, use_enhanced):
166
  prompts.add(enhanced_prompt)
167
  if len(prompts) == 1 and num_variants > 1:
168
  prompts.add(prompt)
 
 
169
  except Exception as e:
170
  st.error(f"Error generando variaciones: {e}")
171
  return [prompt] * num_variants
@@ -240,7 +261,12 @@ def main():
240
  prompts = [prompt] * num_variants
241
 
242
  if st.sidebar.button("Generar Imagen"):
243
- images = gen(prompts, width, height, model_option, num_variants=num_variants)
 
 
 
 
 
244
 
245
  if generated_image_path and upscale_checkbox:
246
  upscale_factor = st.sidebar.slider("Factor de Escalado", 1, 4, 2)
 
9
  import numpy as np
10
  import yaml
11
  import traceback
12
+ import asyncio
13
+ from transformers import AutoTokenizer, pipeline
14
 
15
  try:
16
  with open("config.yaml", "r") as file:
 
26
  PREDEFINED_SEED = random.randint(0, MAX_SEED)
27
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
28
 
29
+ try:
30
+ llm_client = pipeline(
31
+ "text-generation",
32
+ model="mistralai/Mistral-7B-Instruct-v0.1",
33
+ device="cpu"
34
+ )
35
+ except Exception as e:
36
+ st.error(f"Error al cargar el modelo de lenguaje: {e}")
37
+ llm_client = None
38
+
39
  if not HF_TOKEN_UPSCALER:
40
  st.warning("HF_TOKEN_UPSCALER no est谩 configurado. Algunas funcionalidades pueden no funcionar.")
41
 
42
+ def handle_file(file_path):
43
+ if isinstance(file_path, (str, Path)):
44
+ with open(file_path, 'rb') as f:
45
+ return f.read()
46
+ return file_path
47
+
48
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
49
  try:
50
  upscale_client = InferenceClient("fal/AuraSR-v2", hf_token=HF_TOKEN_UPSCALER)
 
145
  st.success("Prompt guardado.")
146
 
147
  async def improve_prompt(prompt):
148
+ if not prompt.strip() or not llm_client:
149
  return prompt
150
 
151
  try:
 
160
  "With this inspiration, realize a cinematic txt2img prompt in English with hyperrealistic elements in 200 characters maximum",
161
  "With my idea, make a lifelike and txt2img prompt in English, focusing on photorealistic depth in 200 characters maximum"
162
  ]
163
+ result = llm_client(
164
  f"{prompt}: {random.choice(instructions)}",
165
  max_new_tokens=256,
166
  temperature=0.7,
 
168
  repetition_penalty=1.1,
169
  do_sample=True
170
  )
171
+ generated_text = result[0]['generated_text'] if isinstance(result, list) else str(result)
172
+ return generated_text[:200].strip() or prompt
173
  except Exception as e:
174
  st.error(f"Error al mejorar prompt: {str(e)}\n{traceback.format_exc()}")
175
  return prompt
 
185
  prompts.add(enhanced_prompt)
186
  if len(prompts) == 1 and num_variants > 1:
187
  prompts.add(prompt)
188
+ if len(prompts) < num_variants:
189
+ prompts.update([prompt] * (num_variants - len(prompts)))
190
  except Exception as e:
191
  st.error(f"Error generando variaciones: {e}")
192
  return [prompt] * num_variants
 
261
  prompts = [prompt] * num_variants
262
 
263
  if st.sidebar.button("Generar Imagen"):
264
+ try:
265
+ images = gen(prompts, width, height, model_option, num_variants=num_variants)
266
+ if not images:
267
+ st.error("No se pudieron generar im谩genes")
268
+ except Exception as e:
269
+ st.error(f"Error durante la generaci贸n: {e}")
270
 
271
  if generated_image_path and upscale_checkbox:
272
  upscale_factor = st.sidebar.slider("Factor de Escalado", 1, 4, 2)