Update README.md
Browse files
README.md
CHANGED
@@ -86,7 +86,7 @@ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-Audio", device_map="cuda
|
|
86 |
# Specify hyperparameters for generation (No need to do this if you are using transformers>4.32.0)
|
87 |
# model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-Audio", trust_remote_code=True)
|
88 |
audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Audio/1272-128104-0000.flac"
|
89 |
-
sp_prompt = "<|
|
90 |
query = f"<audio>{audio_url}</audio>{sp_prompt}"
|
91 |
audio_info = tokenizer.process_audio(query)
|
92 |
inputs = tokenizer(query, return_tensors='pt', audio_info=audio_info)
|
|
|
86 |
# Specify hyperparameters for generation (No need to do this if you are using transformers>4.32.0)
|
87 |
# model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-Audio", trust_remote_code=True)
|
88 |
audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Audio/1272-128104-0000.flac"
|
89 |
+
sp_prompt = "<|startoftranscript|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>"
|
90 |
query = f"<audio>{audio_url}</audio>{sp_prompt}"
|
91 |
audio_info = tokenizer.process_audio(query)
|
92 |
inputs = tokenizer(query, return_tensors='pt', audio_info=audio_info)
|