Aitron Emper commited on
Commit
5024722
1 Parent(s): 4583551

Update rvc/lib/tools/model_download.py

Browse files
Files changed (1) hide show
  1. rvc/lib/tools/model_download.py +361 -363
rvc/lib/tools/model_download.py CHANGED
@@ -1,363 +1,361 @@
1
- import os
2
- import sys
3
- import wget
4
- import zipfile
5
- from bs4 import BeautifulSoup
6
- import requests
7
- from urllib.parse import unquote, urlencode, parse_qs, urlparse
8
- import re
9
- import shutil
10
- import six
11
-
12
-
13
- def find_folder_parent(search_dir, folder_name):
14
- for dirpath, dirnames, _ in os.walk(search_dir):
15
- if folder_name in dirnames:
16
- return os.path.abspath(dirpath)
17
- return None
18
-
19
-
20
- now_dir = os.getcwd()
21
- sys.path.append(now_dir)
22
-
23
- from rvc.lib.utils import format_title
24
-
25
- from rvc.lib.tools import gdown
26
-
27
- file_path = find_folder_parent(now_dir, "logs")
28
-
29
- zips_path = os.getcwd() + "/logs/zips"
30
-
31
-
32
- def search_pth_index(folder):
33
- pth_paths = [
34
- os.path.join(folder, file)
35
- for file in os.listdir(folder)
36
- if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
37
- ]
38
- index_paths = [
39
- os.path.join(folder, file)
40
- for file in os.listdir(folder)
41
- if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
42
- ]
43
-
44
- return pth_paths, index_paths
45
-
46
-
47
- def get_mediafire_download_link(url):
48
- response = requests.get(url)
49
- response.raise_for_status()
50
- soup = BeautifulSoup(response.text, "html.parser")
51
- download_button = soup.find(
52
- "a", {"class": "input popsok", "aria-label": "Download file"}
53
- )
54
- if download_button:
55
- download_link = download_button.get("href")
56
- return download_link
57
- else:
58
- return None
59
-
60
-
61
- def download_from_url(url):
62
- os.makedirs(zips_path, exist_ok=True)
63
- if url != "":
64
- if "drive.google.com" in url:
65
- if "file/d/" in url:
66
- file_id = url.split("file/d/")[1].split("/")[0]
67
- elif "id=" in url:
68
- file_id = url.split("id=")[1].split("&")[0]
69
- else:
70
- return None
71
-
72
- if file_id:
73
- os.chdir(zips_path)
74
- try:
75
- gdown.download(
76
- f"https://drive.google.com/uc?id={file_id}",
77
- quiet=True,
78
- fuzzy=True,
79
- )
80
- except Exception as error:
81
- error_message = str(error)
82
- if (
83
- "Too many users have viewed or downloaded this file recently"
84
- in error_message
85
- ):
86
- os.chdir(now_dir)
87
- return "too much use"
88
- elif (
89
- "Cannot retrieve the public link of the file." in error_message
90
- ):
91
- os.chdir(now_dir)
92
- return "private link"
93
- else:
94
- print(error_message)
95
- os.chdir(now_dir)
96
- return None
97
- elif "disk.yandex.ru" in url:
98
- base_url = "https://cloud-api.yandex.net/v1/disk/public/resources/download?"
99
- public_key = url
100
- final_url = base_url + urlencode(dict(public_key=public_key))
101
- response = requests.get(final_url)
102
- download_url = response.json()["href"]
103
- download_response = requests.get(download_url)
104
-
105
- if download_response.status_code == 200:
106
- filename = parse_qs(urlparse(unquote(download_url)).query).get(
107
- "filename", [""]
108
- )[0]
109
- if filename:
110
- os.chdir(zips_path)
111
- with open(filename, "wb") as f:
112
- f.write(download_response.content)
113
- else:
114
- print("Failed to get filename from URL.")
115
- return None
116
-
117
- elif "pixeldrain.com" in url:
118
- try:
119
- file_id = url.split("pixeldrain.com/u/")[1]
120
- os.chdir(zips_path)
121
- print(file_id)
122
- response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
123
- if response.status_code == 200:
124
- file_name = (
125
- response.headers.get("Content-Disposition")
126
- .split("filename=")[-1]
127
- .strip('";')
128
- )
129
- os.makedirs(zips_path, exist_ok=True)
130
- with open(os.path.join(zips_path, file_name), "wb") as newfile:
131
- newfile.write(response.content)
132
- os.chdir(file_path)
133
- return "downloaded"
134
- else:
135
- os.chdir(file_path)
136
- return None
137
- except Exception as e:
138
- print(e)
139
- os.chdir(file_path)
140
- return None
141
-
142
- elif "cdn.discordapp.com" in url:
143
- file = requests.get(url)
144
- os.chdir(zips_path)
145
- if file.status_code == 200:
146
- name = url.split("/")
147
- with open(os.path.join(name[-1]), "wb") as newfile:
148
- newfile.write(file.content)
149
- else:
150
- return None
151
- elif "/blob/" in url or "/resolve/" in url:
152
- os.chdir(zips_path)
153
- if "/blob/" in url:
154
- url = url.replace("/blob/", "/resolve/")
155
-
156
- response = requests.get(url, stream=True)
157
- if response.status_code == 200:
158
- content_disposition = six.moves.urllib_parse.unquote(
159
- response.headers["Content-Disposition"]
160
- )
161
- m = re.search(r'filename="([^"]+)"', content_disposition)
162
- file_name = m.groups()[0]
163
- file_name = file_name.replace(os.path.sep, "_")
164
- total_size_in_bytes = int(response.headers.get("content-length", 0))
165
- block_size = 1024
166
- progress_bar_length = 50
167
- progress = 0
168
-
169
- with open(os.path.join(zips_path, file_name), "wb") as file:
170
- for data in response.iter_content(block_size):
171
- file.write(data)
172
- progress += len(data)
173
- progress_percent = int((progress / total_size_in_bytes) * 100)
174
- num_dots = int(
175
- (progress / total_size_in_bytes) * progress_bar_length
176
- )
177
- progress_bar = (
178
- "["
179
- + "." * num_dots
180
- + " " * (progress_bar_length - num_dots)
181
- + "]"
182
- )
183
- print(
184
- f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
185
- end="\r",
186
- )
187
- if progress_percent == 100:
188
- print("\n")
189
-
190
- else:
191
- os.chdir(now_dir)
192
- return None
193
- elif "/tree/main" in url:
194
- os.chdir(zips_path)
195
- response = requests.get(url)
196
- soup = BeautifulSoup(response.content, "html.parser")
197
- temp_url = ""
198
- for link in soup.find_all("a", href=True):
199
- if link["href"].endswith(".zip"):
200
- temp_url = link["href"]
201
- break
202
- if temp_url:
203
- url = temp_url
204
- url = url.replace("blob", "resolve")
205
- if "huggingface.co" not in url:
206
- url = "https://huggingface.co" + url
207
-
208
- wget.download(url)
209
- else:
210
- os.chdir(now_dir)
211
- return None
212
- elif "applio.org" in url:
213
- parts = url.split("/")
214
- id_with_query = parts[-1]
215
- id_parts = id_with_query.split("?")
216
- id_number = id_parts[0]
217
-
218
- url = "https://cjtfqzjfdimgpvpwhzlv.supabase.co/rest/v1/models"
219
- headers = {
220
- "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNqdGZxempmZGltZ3B2cHdoemx2Iiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTUxNjczODgsImV4cCI6MjAxMDc0MzM4OH0.7z5WMIbjR99c2Ooc0ma7B_FyGq10G8X-alkCYTkKR10"
221
- }
222
-
223
- params = {"id": f"eq.{id_number}"}
224
- response = requests.get(url, headers=headers, params=params)
225
- if response.status_code == 200:
226
- json_response = response.json()
227
- print(json_response)
228
- if json_response:
229
- link = json_response[0]["link"]
230
- verify = download_from_url(link)
231
- if verify == "downloaded":
232
- return "downloaded"
233
- else:
234
- return None
235
- else:
236
- return None
237
- else:
238
- try:
239
- os.chdir(zips_path)
240
- wget.download(url)
241
- except Exception as error:
242
- os.chdir(now_dir)
243
- print(error)
244
- return None
245
-
246
- for currentPath, _, zipFiles in os.walk(zips_path):
247
- for Files in zipFiles:
248
- filePart = Files.split(".")
249
- extensionFile = filePart[len(filePart) - 1]
250
- filePart.pop()
251
- nameFile = "_".join(filePart)
252
- realPath = os.path.join(currentPath, Files)
253
- os.rename(realPath, nameFile + "." + extensionFile)
254
-
255
- os.chdir(now_dir)
256
- return "downloaded"
257
-
258
- os.chdir(now_dir)
259
- return None
260
-
261
-
262
- def extract_and_show_progress(zipfile_path, unzips_path):
263
- try:
264
- with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
265
- for file_info in zip_ref.infolist():
266
- zip_ref.extract(file_info, unzips_path)
267
- os.remove(zipfile_path)
268
- return True
269
- except Exception as error:
270
- print(error)
271
- return False
272
-
273
-
274
- def unzip_file(zip_path, zip_file_name):
275
- zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
276
- extract_path = os.path.join(file_path, zip_file_name)
277
- with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
278
- zip_ref.extractall(extract_path)
279
- os.remove(zip_file_path)
280
-
281
-
282
- def model_download_pipeline(url):
283
- verify = download_from_url(url)
284
- if verify == "downloaded":
285
- extract_folder_path = ""
286
- for filename in os.listdir(zips_path):
287
- if filename.endswith(".zip"):
288
- zipfile_path = os.path.join(zips_path, filename)
289
- print("Proceeding with the extraction...")
290
-
291
- model_zip = os.path.basename(zipfile_path)
292
- model_name = format_title(model_zip.split(".zip")[0])
293
- extract_folder_path = os.path.join(
294
- "logs",
295
- os.path.normpath(model_name),
296
- )
297
-
298
- success = extract_and_show_progress(zipfile_path, extract_folder_path)
299
-
300
- subfolders = [
301
- f
302
- for f in os.listdir(extract_folder_path)
303
- if os.path.isdir(os.path.join(extract_folder_path, f))
304
- ]
305
- if len(subfolders) == 1:
306
- subfolder_path = os.path.join(extract_folder_path, subfolders[0])
307
- for item in os.listdir(subfolder_path):
308
- s = os.path.join(subfolder_path, item)
309
- d = os.path.join(extract_folder_path, item)
310
- shutil.move(s, d)
311
- os.rmdir(subfolder_path)
312
-
313
- for item in os.listdir(extract_folder_path):
314
- if ".pth" in item:
315
- file_name = item.split(".pth")[0]
316
- if file_name != model_name:
317
- os.rename(
318
- os.path.join(extract_folder_path, item),
319
- os.path.join(extract_folder_path, model_name + ".pth"),
320
- )
321
- else:
322
- if "v2" not in item:
323
- file_name = item.split("_nprobe_1_")[1].split("_v1")[0]
324
- if file_name != model_name:
325
- new_file_name = (
326
- item.split("_nprobe_1_")[0]
327
- + "_nprobe_1_"
328
- + model_name
329
- + "_v1"
330
- )
331
- os.rename(
332
- os.path.join(extract_folder_path, item),
333
- os.path.join(
334
- extract_folder_path, new_file_name + ".index"
335
- ),
336
- )
337
- else:
338
- file_name = item.split("_nprobe_1_")[1].split("_v2")[0]
339
- if file_name != model_name:
340
- new_file_name = (
341
- item.split("_nprobe_1_")[0]
342
- + "_nprobe_1_"
343
- + model_name
344
- + "_v2"
345
- )
346
- os.rename(
347
- os.path.join(extract_folder_path, item),
348
- os.path.join(
349
- extract_folder_path, new_file_name + ".index"
350
- ),
351
- )
352
-
353
- if success:
354
- print(f"Model {model_name} downloaded!")
355
- else:
356
- print(f"Error downloading {model_name}")
357
- sys.exit()
358
- if extract_folder_path == "":
359
- print("Zip file was not found.")
360
- sys.exit()
361
- result = search_pth_index(extract_folder_path)
362
- else:
363
- message = "Error"
 
1
+ import os
2
+ import sys
3
+ import wget
4
+ import zipfile
5
+ from bs4 import BeautifulSoup
6
+ import requests
7
+ from urllib.parse import unquote, urlencode, parse_qs, urlparse
8
+ import re
9
+ import shutil
10
+ import six
11
+
12
+
13
+ def find_folder_parent(search_dir, folder_name):
14
+ for dirpath, dirnames, _ in os.walk(search_dir):
15
+ if folder_name in dirnames:
16
+ return os.path.abspath(dirpath)
17
+ return None
18
+
19
+
20
+ now_dir = os.getcwd()
21
+ sys.path.append(now_dir)
22
+
23
+ from rvc.lib.utils import format_title
24
+
25
+ from rvc.lib.tools import gdown
26
+
27
+ file_path = find_folder_parent(now_dir, "logs")
28
+
29
+ zips_path = os.getcwd() + "/logs/zips"
30
+
31
+
32
+ def search_pth_index(folder):
33
+ pth_paths = [
34
+ os.path.join(folder, file)
35
+ for file in os.listdir(folder)
36
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
37
+ ]
38
+ index_paths = [
39
+ os.path.join(folder, file)
40
+ for file in os.listdir(folder)
41
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
42
+ ]
43
+
44
+ return pth_paths, index_paths
45
+
46
+
47
+ def get_mediafire_download_link(url):
48
+ response = requests.get(url)
49
+ response.raise_for_status()
50
+ soup = BeautifulSoup(response.text, "html.parser")
51
+ download_button = soup.find(
52
+ "a", {"class": "input popsok", "aria-label": "Download file"}
53
+ )
54
+ if download_button:
55
+ download_link = download_button.get("href")
56
+ return download_link
57
+ else:
58
+ return None
59
+
60
+
61
+ def download_from_url(url):
62
+ os.makedirs(zips_path, exist_ok=True)
63
+ if url != "":
64
+ if "drive.google.com" in url:
65
+ if "file/d/" in url:
66
+ file_id = url.split("file/d/")[1].split("/")[0]
67
+ elif "id=" in url:
68
+ file_id = url.split("id=")[1].split("&")[0]
69
+ else:
70
+ return None
71
+
72
+ if file_id:
73
+ os.chdir(zips_path)
74
+ try:
75
+ gdown.download(
76
+ f"https://drive.google.com/uc?id={file_id}",
77
+ quiet=True,
78
+ fuzzy=True,
79
+ )
80
+ except Exception as error:
81
+ error_message = str(error)
82
+ if (
83
+ "Too many users have viewed or downloaded this file recently"
84
+ in error_message
85
+ ):
86
+ os.chdir(now_dir)
87
+ return "too much use"
88
+ elif (
89
+ "Cannot retrieve the public link of the file." in error_message
90
+ ):
91
+ os.chdir(now_dir)
92
+ return "private link"
93
+ else:
94
+ print(error_message)
95
+ os.chdir(now_dir)
96
+ return None
97
+ elif "disk.yandex.ru" in url:
98
+ base_url = "https://cloud-api.yandex.net/v1/disk/public/resources/download?"
99
+ public_key = url
100
+ final_url = base_url + urlencode(dict(public_key=public_key))
101
+ response = requests.get(final_url)
102
+ download_url = response.json()["href"]
103
+ download_response = requests.get(download_url)
104
+
105
+ if download_response.status_code == 200:
106
+ filename = parse_qs(urlparse(unquote(download_url)).query).get(
107
+ "filename", [""]
108
+ )[0]
109
+ if filename:
110
+ os.chdir(zips_path)
111
+ with open(filename, "wb") as f:
112
+ f.write(download_response.content)
113
+ else:
114
+ print("Failed to get filename from URL.")
115
+ return None
116
+
117
+ elif "pixeldrain.com" in url:
118
+ try:
119
+ file_id = url.split("pixeldrain.com/u/")[1]
120
+ os.chdir(zips_path)
121
+ print(file_id)
122
+ response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
123
+ if response.status_code == 200:
124
+ file_name = (
125
+ response.headers.get("Content-Disposition")
126
+ .split("filename=")[-1]
127
+ .strip('";')
128
+ )
129
+ os.makedirs(zips_path, exist_ok=True)
130
+ with open(os.path.join(zips_path, file_name), "wb") as newfile:
131
+ newfile.write(response.content)
132
+ os.chdir(file_path)
133
+ return "downloaded"
134
+ else:
135
+ os.chdir(file_path)
136
+ return None
137
+ except Exception as e:
138
+ print(e)
139
+ os.chdir(file_path)
140
+ return None
141
+
142
+ elif "cdn.discordapp.com" in url:
143
+ file = requests.get(url)
144
+ os.chdir(zips_path)
145
+ if file.status_code == 200:
146
+ name = url.split("/")
147
+ with open(os.path.join(name[-1]), "wb") as newfile:
148
+ newfile.write(file.content)
149
+ else:
150
+ return None
151
+ elif "/blob/" in url or "/resolve/" in url:
152
+ os.chdir(zips_path)
153
+ if "/blob/" in url:
154
+ url = url.replace("/blob/", "/resolve/")
155
+
156
+ response = requests.get(url, stream=True)
157
+ if response.status_code == 200:
158
+ content_disposition = six.moves.urllib_parse.unquote(
159
+ response.headers["Content-Disposition"]
160
+ )
161
+ m = re.search(r'filename="([^"]+)"', content_disposition)
162
+ file_name = m.groups()[0]
163
+ file_name = file_name.replace(os.path.sep, "_")
164
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
165
+ block_size = 1024
166
+ progress_bar_length = 50
167
+ progress = 0
168
+
169
+ with open(os.path.join(zips_path, file_name), "wb") as file:
170
+ for data in response.iter_content(block_size):
171
+ file.write(data)
172
+ progress += len(data)
173
+ progress_percent = int((progress / total_size_in_bytes) * 100)
174
+ num_dots = int(
175
+ (progress / total_size_in_bytes) * progress_bar_length
176
+ )
177
+ progress_bar = (
178
+ "["
179
+ + "." * num_dots
180
+ + " " * (progress_bar_length - num_dots)
181
+ + "]"
182
+ )
183
+ print(
184
+ f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
185
+ end="\r",
186
+ )
187
+ if progress_percent == 100:
188
+ print("\n")
189
+
190
+ else:
191
+ os.chdir(now_dir)
192
+ return None
193
+ elif "/tree/main" in url:
194
+ os.chdir(zips_path)
195
+ response = requests.get(url)
196
+ soup = BeautifulSoup(response.content, "html.parser")
197
+ temp_url = ""
198
+ for link in soup.find_all("a", href=True):
199
+ if link["href"].endswith(".zip"):
200
+ temp_url = link["href"]
201
+ break
202
+ if temp_url:
203
+ url = temp_url
204
+ url = url.replace("blob", "resolve")
205
+ if "huggingface.co" not in url:
206
+ url = "https://huggingface.co" + url
207
+
208
+ wget.download(url)
209
+ else:
210
+ os.chdir(now_dir)
211
+ return None
212
+ elif "applio.org" in url:
213
+ parts = url.split("/")
214
+ id_with_query = parts[-1]
215
+ id_parts = id_with_query.split("?")
216
+ id_number = id_parts[0]
217
+
218
+ url = "https://cjtfqzjfdimgpvpwhzlv.supabase.co/rest/v1/models"
219
+ headers = {
220
+ "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNqdGZxempmZGltZ3B2cHdoemx2Iiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTUxNjczODgsImV4cCI6MjAxMDc0MzM4OH0.7z5WMIbjR99c2Ooc0ma7B_FyGq10G8X-alkCYTkKR10"
221
+ }
222
+
223
+ params = {"id": f"eq.{id_number}"}
224
+ response = requests.get(url, headers=headers, params=params)
225
+ if response.status_code == 200:
226
+ json_response = response.json()
227
+ print(json_response)
228
+ if json_response:
229
+ link = json_response[0]["link"]
230
+ verify = download_from_url(link)
231
+ if verify == "downloaded":
232
+ return "downloaded"
233
+ else:
234
+ return None
235
+ else:
236
+ return None
237
+ else:
238
+ try:
239
+ os.chdir(zips_path)
240
+ wget.download(url)
241
+ except Exception as error:
242
+ os.chdir(now_dir)
243
+ print(error)
244
+ return None
245
+
246
+ for currentPath, _, zipFiles in os.walk(zips_path):
247
+ for Files in zipFiles:
248
+ filePart = Files.split(".")
249
+ extensionFile = filePart[len(filePart) - 1]
250
+ filePart.pop()
251
+ nameFile = "_".join(filePart)
252
+ realPath = os.path.join(currentPath, Files)
253
+ os.rename(realPath, nameFile + "." + extensionFile)
254
+
255
+ os.chdir(now_dir)
256
+ return "downloaded"
257
+
258
+ os.chdir(now_dir)
259
+ return None
260
+
261
+
262
+ def extract_and_show_progress(zipfile_path, unzips_path):
263
+ try:
264
+ with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
265
+ for file_info in zip_ref.infolist():
266
+ zip_ref.extract(file_info, unzips_path)
267
+ os.remove(zipfile_path)
268
+ return True
269
+ except Exception as error:
270
+ print(error)
271
+ return False
272
+
273
+
274
+ def unzip_file(zip_path, zip_file_name):
275
+ zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
276
+ extract_path = os.path.join(file_path, zip_file_name)
277
+ with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
278
+ zip_ref.extractall(extract_path)
279
+ os.remove(zip_file_path)
280
+
281
+
282
+ def model_download_pipeline(url):
283
+ verify = download_from_url(url)
284
+ if verify == "downloaded":
285
+ extract_folder_path = ""
286
+ for filename in os.listdir(zips_path):
287
+ if filename.endswith(".zip"):
288
+ zipfile_path = os.path.join(zips_path, filename)
289
+ print("Proceeding with the extraction...")
290
+
291
+ model_zip = os.path.basename(zipfile_path)
292
+ model_name = format_title(model_zip.split(".zip")[0])
293
+ extract_folder_path = os.path.join(
294
+ "logs",
295
+ os.path.normpath(model_name),
296
+ )
297
+
298
+ success = extract_and_show_progress(zipfile_path, extract_folder_path)
299
+
300
+ subfolders = [
301
+ f
302
+ for f in os.listdir(extract_folder_path)
303
+ if os.path.isdir(os.path.join(extract_folder_path, f))
304
+ ]
305
+ if len(subfolders) == 1:
306
+ subfolder_path = os.path.join(extract_folder_path, subfolders[0])
307
+ for item in os.listdir(subfolder_path):
308
+ s = os.path.join(subfolder_path, item)
309
+ d = os.path.join(extract_folder_path, item)
310
+ shutil.move(s, d)
311
+ os.rmdir(subfolder_path)
312
+
313
+ for item in os.listdir(extract_folder_path):
314
+ if ".pth" in item:
315
+ file_name = item.split(".pth")[0]
316
+ if file_name != model_name:
317
+ os.rename(
318
+ os.path.join(extract_folder_path, item),
319
+ os.path.join(extract_folder_path, model_name + ".pth"),
320
+ )
321
+ else:
322
+ if "v2" not in item:
323
+ file_name = item.split("_nprobe_1_")[1].split("_v1")[0]
324
+ if file_name != model_name:
325
+ new_file_name = (
326
+ item.split("_nprobe_1_")[0]
327
+ + "_nprobe_1_"
328
+ + model_name
329
+ + "_v1"
330
+ )
331
+ os.rename(
332
+ os.path.join(extract_folder_path, item),
333
+ os.path.join(
334
+ extract_folder_path, new_file_name + ".index"
335
+ ),
336
+ )
337
+ else:
338
+ file_name = item.split("_nprobe_1_")[1].split("_v2")[0]
339
+ if file_name != model_name:
340
+ new_file_name = (
341
+ item.split("_nprobe_1_")[0]
342
+ + "_nprobe_1_"
343
+ + model_name
344
+ + "_v2"
345
+ )
346
+ os.rename(
347
+ os.path.join(extract_folder_path, item),
348
+ os.path.join(
349
+ extract_folder_path, new_file_name + ".index"
350
+ ),
351
+ )
352
+
353
+ if success:
354
+ print(f"Model {model_name} downloaded!")
355
+ else:
356
+ print(f"Error downloading {model_name}")
357
+ if extract_folder_path == "":
358
+ print("Zip file was not found.")
359
+ result = search_pth_index(extract_folder_path)
360
+ else:
361
+ message = "Error"