|
|
|
|
|
|
|
|
|
import os, pdb |
|
import numpy as np |
|
import torch |
|
|
|
|
|
def mkdir_for(file_path): |
|
os.makedirs(os.path.split(file_path)[0], exist_ok=True) |
|
|
|
|
|
def model_size(model): |
|
"""Computes the number of parameters of the model""" |
|
size = 0 |
|
for weights in model.state_dict().values(): |
|
size += np.prod(weights.shape) |
|
return size |
|
|
|
|
|
def torch_set_gpu(gpus): |
|
if type(gpus) is int: |
|
gpus = [gpus] |
|
|
|
cuda = all(gpu >= 0 for gpu in gpus) |
|
|
|
if cuda: |
|
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(gpu) for gpu in gpus]) |
|
assert cuda and torch.cuda.is_available(), "%s has GPUs %s unavailable" % ( |
|
os.environ["HOSTNAME"], |
|
os.environ["CUDA_VISIBLE_DEVICES"], |
|
) |
|
torch.backends.cudnn.benchmark = True |
|
torch.backends.cudnn.fastest = True |
|
print("Launching on GPUs " + os.environ["CUDA_VISIBLE_DEVICES"]) |
|
|
|
else: |
|
print("Launching on CPU") |
|
|
|
return cuda |
|
|