wasmdashai commited on
Commit
2f454a6
·
verified ·
1 Parent(s): a509b65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -5,6 +5,9 @@ from VitsModelSplit.vits_model_only_d import Vits_models_only_decoder
5
  from VitsModelSplit.vits_model import VitsModel
6
  import gradio as gr
7
  import os
 
 
 
8
  def create_file(file_path):
9
  # مسار الملف المؤقت
10
  #file_path = "DDFGDdd.onnx"
@@ -96,12 +99,14 @@ class OnnxModelConverter:
96
  model = Vits_models_only_decoder.from_pretrained(model_name, token=token)
97
  onnx_file = f"/tmp/{onnx_filename}.onnx"
98
 
99
- vocab_size = model.text_encoder.embed_tokens.weight.size(0)
100
- example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
 
 
101
 
102
  torch.onnx.export(
103
  model,
104
- example_input,
105
  onnx_file,
106
  opset_version=11,
107
  input_names=['input'],
 
5
  from VitsModelSplit.vits_model import VitsModel
6
  import gradio as gr
7
  import os
8
+ from transformers import AutoTokenizer
9
+ tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vits-ar")
10
+
11
  def create_file(file_path):
12
  # مسار الملف المؤقت
13
  #file_path = "DDFGDdd.onnx"
 
99
  model = Vits_models_only_decoder.from_pretrained(model_name, token=token)
100
  onnx_file = f"/tmp/{onnx_filename}.onnx"
101
 
102
+ inputs = tokenizer("السلام عليكم كيف الحال", return_tensors="pt")
103
+
104
+ # Trace the decoder part of the model
105
+ example_inputs = inputs.input_ids.type(torch.LongTensor)
106
 
107
  torch.onnx.export(
108
  model,
109
+ example_inputs,
110
  onnx_file,
111
  opset_version=11,
112
  input_names=['input'],