yuangongfdu commited on
Commit
bb758f0
1 Parent(s): b8b03ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -17,7 +17,7 @@ def round_time_resolution(time_resolution):
17
 
18
  def predict(audio_path, time_resolution):
19
  audio_tagging_time_resolution = round_time_resolution(time_resolution)
20
- # result = model.transcribe(audio_path, at_time_res=audio_tagging_time_resolution)
21
  # # ASR Results
22
  # print(result["text"])
23
  # # Audio Tagging Results
@@ -32,7 +32,7 @@ def predict(audio_path, time_resolution):
32
  # print(segment)
33
  # at_output = at_output + str(segment['time']['start']) + 's-' + str(segment['time']['end']) + 's: ' + ','.join([x[0] for x in segment['audio tags']]) + '\n'
34
  # print(at_output)
35
- asr_output, at_output = str(audio_tagging_time_resolution), 'k'
36
  return asr_output, at_output
37
 
38
  iface = gr.Interface(fn=predict,
 
17
 
18
  def predict(audio_path, time_resolution):
19
  audio_tagging_time_resolution = round_time_resolution(time_resolution)
20
+ result = model.transcribe(audio_path, at_time_res=audio_tagging_time_resolution)
21
  # # ASR Results
22
  # print(result["text"])
23
  # # Audio Tagging Results
 
32
  # print(segment)
33
  # at_output = at_output + str(segment['time']['start']) + 's-' + str(segment['time']['end']) + 's: ' + ','.join([x[0] for x in segment['audio tags']]) + '\n'
34
  # print(at_output)
35
+ asr_output, at_output = str(result["text"]), 'k'
36
  return asr_output, at_output
37
 
38
  iface = gr.Interface(fn=predict,