cocktailpeanut commited on
Commit
7d6f42b
·
1 Parent(s): f5d52fb
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -44,11 +44,14 @@ def init():
44
  # The undistilled model that uses CFG ("pro") which can use negative prompts
45
  # was not released.
46
  bfl_repo = "cocktailpeanut/xulf-s"
 
47
 
48
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler")
49
- text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
 
50
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
51
- text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
 
52
  tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
53
  vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
54
  transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)
 
44
  # The undistilled model that uses CFG ("pro") which can use negative prompts
45
  # was not released.
46
  bfl_repo = "cocktailpeanut/xulf-s"
47
+ te_repo = "comfyanonymous/flux_text_encoders"
48
 
49
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler")
50
+ #text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
51
+ text_encoder = CLIPTextModel.from_pretrained("./flux_text_encoders/clip_l.safetensors", torch_dtype=dtype)
52
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
53
+ #text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
54
+ text_encoder_2 = T5EncoderModel.from_pretrained("./flux_text_encoders/t5xxl_fp8_e4m3fn.safetensors", torch_dtype=dtype)
55
  tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
56
  vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
57
  transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)