Vivawaves commited on
Commit
42c9772
1 Parent(s): b9b998b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,7 +12,7 @@ from previewer.modules import Previewer
12
 
13
  from gallery_history import fetch_gallery_history, show_gallery_history
14
 
15
- os.environ['TOKENIZERS_PARALLELISM'] = 'true'
16
 
17
  DESCRIPTION = "# Waves Weaves"
18
  DESCRIPTION += "\n<p style=\"text-align: center\"></p>"
@@ -21,7 +21,7 @@ if not torch.cuda.is_available():
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
24
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
25
  USE_TORCH_COMPILE = False
26
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
27
  PREVIEW_IMAGES = True
@@ -169,7 +169,7 @@ with gr.Blocks(css="style.css") as demo:
169
  num_images_per_prompt = gr.Slider(
170
  label="Number of Images",
171
  minimum=1,
172
- maximum=5,
173
  step=1,
174
  value=2,
175
  )
 
12
 
13
  from gallery_history import fetch_gallery_history, show_gallery_history
14
 
15
+ os.environ['TOKENIZERS_PARALLELISM'] = 'false'
16
 
17
  DESCRIPTION = "# Waves Weaves"
18
  DESCRIPTION += "\n<p style=\"text-align: center\"></p>"
 
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
24
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
25
  USE_TORCH_COMPILE = False
26
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
27
  PREVIEW_IMAGES = True
 
169
  num_images_per_prompt = gr.Slider(
170
  label="Number of Images",
171
  minimum=1,
172
+ maximum=2,
173
  step=1,
174
  value=2,
175
  )