Blane187 commited on
Commit
31788db
1 Parent(s): 51d4d2f

Delete config.py

Browse files
Files changed (1) hide show
  1. config.py +0 -184
config.py DELETED
@@ -1,184 +0,0 @@
1
- import argparse
2
- import sys
3
- import torch
4
- import json
5
- from multiprocessing import cpu_count
6
-
7
- global usefp16
8
- usefp16 = False
9
-
10
-
11
- def use_fp32_config():
12
- usefp16 = False
13
- device_capability = 0
14
- if torch.cuda.is_available():
15
- device = torch.device("cuda:0") # Assuming you have only one GPU (index 0).
16
- device_capability = torch.cuda.get_device_capability(device)[0]
17
- if device_capability >= 7:
18
- usefp16 = True
19
- for config_file in ["32k.json", "40k.json", "48k.json"]:
20
- with open(f"configs/{config_file}", "r") as d:
21
- data = json.load(d)
22
-
23
- if "train" in data and "fp16_run" in data["train"]:
24
- data["train"]["fp16_run"] = True
25
-
26
- with open(f"configs/{config_file}", "w") as d:
27
- json.dump(data, d, indent=4)
28
-
29
- print(f"Set fp16_run to true in {config_file}")
30
-
31
- strr = None
32
- else:
33
- for config_file in ["32k.json", "40k.json", "48k.json"]:
34
- with open(f"configs/{config_file}", "r") as f:
35
- data = json.load(f)
36
-
37
- if "train" in data and "fp16_run" in data["train"]:
38
- data["train"]["fp16_run"] = False
39
-
40
- with open(f"configs/{config_file}", "w") as d:
41
- json.dump(data, d, indent=4)
42
-
43
- print(f"Set fp16_run to false in {config_file}")
44
-
45
- strr = None
46
- else:
47
- print(
48
- "CUDA is not available. Make sure you have an NVIDIA GPU and CUDA installed."
49
- )
50
- return (usefp16, device_capability)
51
-
52
-
53
- class Config:
54
- def __init__(self):
55
- self.device = "cuda:0"
56
- self.is_half = True
57
- self.n_cpu = 0
58
- self.gpu_name = None
59
- self.gpu_mem = None
60
- (
61
- self.python_cmd,
62
- self.listen_port,
63
- self.iscolab,
64
- self.noparallel,
65
- self.noautoopen,
66
- self.paperspace,
67
- self.is_cli,
68
- ) = self.arg_parse()
69
-
70
- self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
71
-
72
- @staticmethod
73
- def arg_parse() -> tuple:
74
- exe = sys.executable or "python"
75
- parser = argparse.ArgumentParser()
76
- parser.add_argument("--port", type=int, default=7865, help="Listen port")
77
- parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
78
- parser.add_argument("--colab", action="store_true", help="Launch in colab")
79
- parser.add_argument(
80
- "--noparallel", action="store_true", help="Disable parallel processing"
81
- )
82
- parser.add_argument(
83
- "--noautoopen",
84
- action="store_true",
85
- help="Do not open in browser automatically",
86
- )
87
- parser.add_argument( # Fork Feature. Paperspace integration for web UI
88
- "--paperspace",
89
- action="store_true",
90
- help="Note that this argument just shares a gradio link for the web UI. Thus can be used on other non-local CLI systems.",
91
- )
92
- parser.add_argument( # Fork Feature. Embed a CLI into the infer-web.py
93
- "--is_cli",
94
- action="store_true",
95
- help="Use the CLI instead of setting up a gradio UI. This flag will launch an RVC text interface where you can execute functions from infer-web.py!",
96
- )
97
- cmd_opts = parser.parse_args()
98
-
99
- cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
100
-
101
- return (
102
- cmd_opts.pycmd,
103
- cmd_opts.port,
104
- cmd_opts.colab,
105
- cmd_opts.noparallel,
106
- cmd_opts.noautoopen,
107
- cmd_opts.paperspace,
108
- cmd_opts.is_cli,
109
- )
110
-
111
- # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
112
- # check `getattr` and try it for compatibility
113
- @staticmethod
114
- def has_mps() -> bool:
115
- if not torch.backends.mps.is_available():
116
- return False
117
- try:
118
- torch.zeros(1).to(torch.device("mps"))
119
- return True
120
- except Exception:
121
- return False
122
-
123
- def device_config(self) -> tuple:
124
- if torch.cuda.is_available():
125
- i_device = int(self.device.split(":")[-1])
126
- self.gpu_name = torch.cuda.get_device_name(i_device)
127
- if (
128
- ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
129
- or "P40" in self.gpu_name.upper()
130
- or "1060" in self.gpu_name
131
- or "1070" in self.gpu_name
132
- or "1080" in self.gpu_name
133
- ):
134
- print("Found GPU", self.gpu_name, ", force to fp32")
135
- self.is_half = False
136
- else:
137
- print("Found GPU", self.gpu_name)
138
- use_fp32_config()
139
- self.gpu_mem = int(
140
- torch.cuda.get_device_properties(i_device).total_memory
141
- / 1024
142
- / 1024
143
- / 1024
144
- + 0.4
145
- )
146
- if self.gpu_mem <= 4:
147
- with open("trainset_preprocess_pipeline_print.py", "r") as f:
148
- strr = f.read().replace("3.7", "3.0")
149
- with open("trainset_preprocess_pipeline_print.py", "w") as f:
150
- f.write(strr)
151
- elif self.has_mps():
152
- print("No supported Nvidia GPU found, use MPS instead")
153
- self.device = "mps"
154
- self.is_half = False
155
- use_fp32_config()
156
- else:
157
- print("No supported Nvidia GPU found, use CPU instead")
158
- self.device = "cpu"
159
- self.is_half = False
160
- use_fp32_config()
161
-
162
- if self.n_cpu == 0:
163
- self.n_cpu = cpu_count()
164
-
165
- if self.is_half:
166
- # 6G显存配置
167
- x_pad = 3
168
- x_query = 10
169
- x_center = 60
170
- x_max = 65
171
- else:
172
- # 5G显存配置
173
- x_pad = 1
174
- x_query = 6
175
- x_center = 38
176
- x_max = 41
177
-
178
- if self.gpu_mem != None and self.gpu_mem <= 4:
179
- x_pad = 1
180
- x_query = 5
181
- x_center = 30
182
- x_max = 32
183
-
184
- return x_pad, x_query, x_center, x_max