mrfakename commited on
Commit
5975221
1 Parent(s): 79086d9

Sync from GitHub repo

Browse files

This Space is synced from the GitHub repo: https://github.com/SWivid/F5-TTS. Please submit contributions to the Space there

Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -55,6 +55,7 @@ chat_model_state = None
55
  chat_tokenizer_state = None
56
 
57
 
 
58
  def generate_response(messages, model, tokenizer):
59
  """Generate response using Qwen"""
60
  text = tokenizer.apply_chat_template(
@@ -457,6 +458,7 @@ Have a conversation with an AI using your reference voice!
457
 
458
  chat_interface_container = gr.Column(visible=False)
459
 
 
460
  def load_chat_model():
461
  global chat_model_state, chat_tokenizer_state
462
  if chat_model_state is None:
@@ -520,6 +522,7 @@ Have a conversation with an AI using your reference voice!
520
  )
521
 
522
  # Modify process_audio_input to use model and tokenizer from state
 
523
  def process_audio_input(audio_path, history, conv_state):
524
  """Handle audio input from user"""
525
  if not audio_path:
@@ -541,6 +544,7 @@ Have a conversation with an AI using your reference voice!
541
 
542
  return history, conv_state, ""
543
 
 
544
  def generate_audio_response(history, ref_audio, ref_text, model, remove_silence):
545
  """Generate TTS audio for AI response"""
546
  if not history or not ref_audio:
 
55
  chat_tokenizer_state = None
56
 
57
 
58
+ @gpu_decorator
59
  def generate_response(messages, model, tokenizer):
60
  """Generate response using Qwen"""
61
  text = tokenizer.apply_chat_template(
 
458
 
459
  chat_interface_container = gr.Column(visible=False)
460
 
461
+ @gpu_decorator
462
  def load_chat_model():
463
  global chat_model_state, chat_tokenizer_state
464
  if chat_model_state is None:
 
522
  )
523
 
524
  # Modify process_audio_input to use model and tokenizer from state
525
+ @gpu_decorator
526
  def process_audio_input(audio_path, history, conv_state):
527
  """Handle audio input from user"""
528
  if not audio_path:
 
544
 
545
  return history, conv_state, ""
546
 
547
+ @gpu_decorator
548
  def generate_audio_response(history, ref_audio, ref_text, model, remove_silence):
549
  """Generate TTS audio for AI response"""
550
  if not history or not ref_audio: