Spanicin commited on
Commit
60da68b
1 Parent(s): 3830020

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -84
app.py CHANGED
@@ -317,7 +317,7 @@ def translate_text(text_prompt, target_language):
317
  def openai_chat_avatar(text_prompt):
318
  response = client.chat.completions.create(
319
  model="gpt-4o-mini",
320
- messages=[{"role": "system", "content": "Answer using the minimum words you can ever use."},
321
  {"role": "user", "content": f"Hi! I need help with something. Can you assist me with the following: {text_prompt}"},
322
  ],
323
  max_tokens = len(text_prompt) + 300 # Use the length of the input text
@@ -381,7 +381,7 @@ def generate_video():
381
  try:
382
  if request.method == 'POST':
383
  # source_image = request.files['source_image']
384
- image_path = '/home/user/app/images/vibhu2.jpg'
385
  source_image = Image.open(image_path)
386
  text_prompt = request.form['text_prompt']
387
 
@@ -508,93 +508,67 @@ def generate_video():
508
  ref_pose_video.save(ref_pose_video_path)
509
  print('ref_pose_video_path',ref_pose_video_path)
510
 
511
-
512
- # Example of using the class with some hypothetical paths
513
- args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale,enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path, image_hardcoded=image_hardcoded)
514
-
515
- if torch.cuda.is_available() and not args.cpu:
516
- args.device = "cuda"
517
- else:
518
- args.device = "cpu"
519
-
520
- generation_thread = threading.Thread(target=main, args=(args,))
521
- app.config['generation_thread'] = generation_thread
522
- generation_thread.start()
523
- # response_data = {"message": "Video generation started",
524
- # "process_id": generation_thread.ident}
525
-
526
- # return jsonify(response_data)
527
-
528
- while generation_thread.is_alive():
529
- if app.config.get('temp_response'):
530
- final_response = app.config['temp_response']
531
- response_data = {
532
- "base64_video": final_response,
533
- "text_prompt": app.config.get('text_prompt'),
534
- "duration": app.config.get('final_video_duration'),
535
- "status": "completed"
536
- }
537
- final_video_path = app.config['final_video_path']
538
- print('final_video_path', final_video_path)
539
-
540
- if final_video_path and os.path.exists(final_video_path):
541
- os.remove(final_video_path)
542
- print("Deleted video file:", final_video_path)
543
-
544
- preprocess_dir = os.path.join("/tmp", "preprocess_data")
545
- custom_cleanup(TEMP_DIR.name, preprocess_dir)
546
-
547
- end_time = time.time()
548
- total_time = round(end_time - start_time, 2)
549
- print("Total time taken for execution:", total_time, " seconds")
550
- response_data["time_taken"] = total_time
551
-
552
- return jsonify(response_data)
553
-
554
  except Exception as e:
555
  app.logger.error(f"An error occurred: {e}")
556
  return "An error occurred", 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557
 
558
- # @app.route("/status", methods=["GET"])
559
- # def check_generation_status():
560
- # global TEMP_DIR
561
- # global start_time
562
- # response = {"base64_video": "","text_prompt":"", "status": ""}
563
- # process_id = request.args.get('process_id', None)
564
-
565
- # # process_id is required to check the status for that specific process
566
- # if process_id:
567
- # generation_thread = app.config.get('generation_thread')
568
- # if generation_thread and generation_thread.ident == int(process_id) and generation_thread.is_alive():
569
- # return jsonify({"status": "in_progress"}), 200
570
- # elif app.config.get('temp_response'):
571
- # # app.config['temp_response']['status'] = 'completed'
572
- # final_response = app.config['temp_response']
573
- # response["base64_video"] = final_response
574
- # response["text_prompt"] = app.config.get('text_prompt')
575
- # response["duration"] = app.config.get('final_video_duration')
576
- # response["status"] = "completed"
577
-
578
- # final_video_path = app.config['final_video_path']
579
- # print('final_video_path',final_video_path)
580
-
581
-
582
- # if final_video_path and os.path.exists(final_video_path):
583
- # os.remove(final_video_path)
584
- # print("Deleted video file:", final_video_path)
585
-
586
- # # TEMP_DIR.cleanup()
587
- # preprocess_dir = os.path.join("/tmp", "preprocess_data")
588
- # custom_cleanup(TEMP_DIR.name, preprocess_dir)
589
-
590
- # print("Temporary files cleaned up, but preprocess_data is retained.")
 
 
 
591
 
592
- # end_time = time.time()
593
- # total_time = round(end_time - start_time, 2)
594
- # print("Total time taken for execution:", total_time, " seconds")
595
- # response["time_taken"] = total_time
596
- # return jsonify(response)
597
- # return jsonify({"error":"No process id provided"})
598
 
599
  @app.route("/health", methods=["GET"])
600
  def health_status():
 
317
  def openai_chat_avatar(text_prompt):
318
  response = client.chat.completions.create(
319
  model="gpt-4o-mini",
320
+ messages=[{"role": "system", "content": "Answer in Portuguese language always using the minimum words you can ever use."},
321
  {"role": "user", "content": f"Hi! I need help with something. Can you assist me with the following: {text_prompt}"},
322
  ],
323
  max_tokens = len(text_prompt) + 300 # Use the length of the input text
 
381
  try:
382
  if request.method == 'POST':
383
  # source_image = request.files['source_image']
384
+ image_path = '/home/user/app/images/marc.png'
385
  source_image = Image.open(image_path)
386
  text_prompt = request.form['text_prompt']
387
 
 
508
  ref_pose_video.save(ref_pose_video_path)
509
  print('ref_pose_video_path',ref_pose_video_path)
510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511
  except Exception as e:
512
  app.logger.error(f"An error occurred: {e}")
513
  return "An error occurred", 500
514
+
515
+ # Example of using the class with some hypothetical paths
516
+ args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale,enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path, image_hardcoded=image_hardcoded)
517
+
518
+ if torch.cuda.is_available() and not args.cpu:
519
+ args.device = "cuda"
520
+ else:
521
+ args.device = "cpu"
522
+
523
+ generation_thread = threading.Thread(target=main, args=(args,))
524
+ app.config['generation_thread'] = generation_thread
525
+ generation_thread.start()
526
+ response_data = {"message": "Video generation started",
527
+ "process_id": generation_thread.ident}
528
 
529
+ return jsonify(response_data)
530
+
531
+
532
+ @app.route("/status", methods=["GET"])
533
+ def check_generation_status():
534
+ global TEMP_DIR
535
+ global start_time
536
+ response = {"base64_video": "","text_prompt":"", "status": ""}
537
+ process_id = request.args.get('process_id', None)
538
+
539
+ # process_id is required to check the status for that specific process
540
+ if process_id:
541
+ generation_thread = app.config.get('generation_thread')
542
+ if generation_thread and generation_thread.ident == int(process_id) and generation_thread.is_alive():
543
+ return jsonify({"status": "in_progress"}), 200
544
+ elif app.config.get('temp_response'):
545
+ # app.config['temp_response']['status'] = 'completed'
546
+ final_response = app.config['temp_response']
547
+ response["base64_video"] = final_response
548
+ response["text_prompt"] = app.config.get('text_prompt')
549
+ response["duration"] = app.config.get('final_video_duration')
550
+ response["status"] = "completed"
551
+
552
+ final_video_path = app.config['final_video_path']
553
+ print('final_video_path',final_video_path)
554
+
555
+
556
+ if final_video_path and os.path.exists(final_video_path):
557
+ os.remove(final_video_path)
558
+ print("Deleted video file:", final_video_path)
559
+
560
+ # TEMP_DIR.cleanup()
561
+ preprocess_dir = os.path.join("/tmp", "preprocess_data")
562
+ custom_cleanup(TEMP_DIR.name, preprocess_dir)
563
+
564
+ print("Temporary files cleaned up, but preprocess_data is retained.")
565
 
566
+ end_time = time.time()
567
+ total_time = round(end_time - start_time, 2)
568
+ print("Total time taken for execution:", total_time, " seconds")
569
+ response["time_taken"] = total_time
570
+ return jsonify(response)
571
+ return jsonify({"error":"No process id provided"})
572
 
573
  @app.route("/health", methods=["GET"])
574
  def health_status():