BestWishYsh commited on
Commit
0161789
1 Parent(s): 954e9cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -17
app.py CHANGED
@@ -86,17 +86,17 @@ class MagicTimeController:
86
  # config models
87
  self.inference_config = OmegaConf.load(inference_config_path)[1]
88
 
89
- # self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
90
- # self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda()
91
- # self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
92
- # self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
93
- # self.text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
94
 
95
- self.tokenizer = tokenizer
96
- self.text_encoder = text_encoder
97
- self.vae = vae
98
- self.unet = unet
99
- self.text_model = text_model
100
 
101
  self.update_motion_module(self.motion_module_list[0])
102
  self.update_dreambooth(self.dreambooth_list[0])
@@ -198,13 +198,14 @@ class MagicTimeController:
198
  }
199
  return gr.Video(value=save_sample_path), gr.Json(value=json_config)
200
 
201
- inference_config = OmegaConf.load(inference_config_path)[1]
202
- tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
203
- text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda()
204
- vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
205
- unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs)).cuda()
206
- text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
207
- controller = MagicTimeController(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, text_model=text_model)
 
208
 
209
  def ui():
210
  with gr.Blocks(css=css) as demo:
 
86
  # config models
87
  self.inference_config = OmegaConf.load(inference_config_path)[1]
88
 
89
+ self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
90
+ self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda()
91
+ self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
92
+ self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
93
+ self.text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
94
 
95
+ # self.tokenizer = tokenizer
96
+ # self.text_encoder = text_encoder
97
+ # self.vae = vae
98
+ # self.unet = unet
99
+ # self.text_model = text_model
100
 
101
  self.update_motion_module(self.motion_module_list[0])
102
  self.update_dreambooth(self.dreambooth_list[0])
 
198
  }
199
  return gr.Video(value=save_sample_path), gr.Json(value=json_config)
200
 
201
+ # inference_config = OmegaConf.load(inference_config_path)[1]
202
+ # tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
203
+ # text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda()
204
+ # vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
205
+ # unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs)).cuda()
206
+ # text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
207
+ # controller = MagicTimeController(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, text_model=text_model)
208
+ controller = MagicTimeController()
209
 
210
  def ui():
211
  with gr.Blocks(css=css) as demo: