fix
Browse files- Image/AlexNet/code/train.py +6 -4
- Image/DenseNet/code/train.py +5 -3
- Image/EfficientNet/code/train.py +5 -3
- Image/GoogLeNet/code/train.py +5 -3
- Image/LeNet5/code/train.py +5 -3
- Image/MobileNetv1/code/train.py +5 -3
- Image/MobileNetv2/code/train.py +5 -3
- Image/MobileNetv3/code/train.py +5 -3
- Image/ResNet/code/train.py +5 -3
- Image/SENet/code/train.py +5 -3
- Image/ShuffleNet/code/train.py +5 -3
- Image/ShuffleNetv2/code/train.py +5 -3
- Image/SwinTransformer/code/train.py +5 -3
- Image/VGG/code/train.py +5 -3
- Image/ViT/code/train.py +5 -3
- Image/ZFNet/code/train.py +5 -3
- Image/run_all_models.py +59 -0
- Image/utils/parse_args.py +1 -0
- Image/utils/train_utils.py +27 -23
- README.md +57 -7
- count.py +10 -0
- model_filter.py +168 -0
- models.json +368 -0
Image/AlexNet/code/train.py
CHANGED
@@ -7,14 +7,14 @@ from utils.parse_args import parse_args
|
|
7 |
from model import AlexNet
|
8 |
#args.train_type #0 for normal train, 1 for data aug train,2 for back door train
|
9 |
|
10 |
-
def main(
|
11 |
# 解析命令行参数
|
12 |
args = parse_args()
|
13 |
# 创建模型
|
14 |
model = AlexNet()
|
15 |
if args.train_type == '0':
|
16 |
# 获取数据加载器
|
17 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
18 |
# 训练模型
|
19 |
train_model(
|
20 |
model=model,
|
@@ -29,11 +29,13 @@ def main(train_type):
|
|
29 |
elif args.train_type == '1':
|
30 |
train_model_data_augmentation(model, epochs=args.epochs, lr=args.lr, device=f'cuda:{args.gpu}',
|
31 |
save_dir='../model', model_name='alexnet',
|
32 |
-
batch_size=args.batch_size, num_workers=args.num_workers
|
|
|
33 |
elif args.train_type == '2':
|
34 |
train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=args.epochs, lr=args.lr,
|
35 |
device=f'cuda:{args.gpu}', save_dir='../model', model_name='alexnet',
|
36 |
-
batch_size=args.batch_size, num_workers=args.num_workers
|
|
|
37 |
|
38 |
if __name__ == '__main__':
|
39 |
main()
|
|
|
7 |
from model import AlexNet
|
8 |
#args.train_type #0 for normal train, 1 for data aug train,2 for back door train
|
9 |
|
10 |
+
def main():
|
11 |
# 解析命令行参数
|
12 |
args = parse_args()
|
13 |
# 创建模型
|
14 |
model = AlexNet()
|
15 |
if args.train_type == '0':
|
16 |
# 获取数据加载器
|
17 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
18 |
# 训练模型
|
19 |
train_model(
|
20 |
model=model,
|
|
|
29 |
elif args.train_type == '1':
|
30 |
train_model_data_augmentation(model, epochs=args.epochs, lr=args.lr, device=f'cuda:{args.gpu}',
|
31 |
save_dir='../model', model_name='alexnet',
|
32 |
+
batch_size=args.batch_size, num_workers=args.num_workers,
|
33 |
+
local_dataset_path=args.dataset_path)
|
34 |
elif args.train_type == '2':
|
35 |
train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=args.epochs, lr=args.lr,
|
36 |
device=f'cuda:{args.gpu}', save_dir='../model', model_name='alexnet',
|
37 |
+
batch_size=args.batch_size, num_workers=args.num_workers,
|
38 |
+
local_dataset_path=args.dataset_path)
|
39 |
|
40 |
if __name__ == '__main__':
|
41 |
main()
|
Image/DenseNet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='densenet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='densenet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='densenet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='densenet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/EfficientNet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='efficientnet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='efficientnet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='efficientnet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='efficientnet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/GoogLeNet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='googlenet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='googlenet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='googlenet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='googlenet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/LeNet5/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='lenet5',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='lenet5',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='lenet5',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='lenet5',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/MobileNetv1/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='mobilenetv1',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='mobilenetv1',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='mobilenetv1',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='mobilenetv1',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/MobileNetv2/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='mobilenetv2',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='mobilenetv2',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='mobilenetv2',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='mobilenetv2',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/MobileNetv3/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='mobilenetv3',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='mobilenetv3',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='mobilenetv3',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='mobilenetv3',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/ResNet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='resnet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='resnet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='resnet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='resnet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/SENet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='senet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='senet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='senet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='senet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/ShuffleNet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='shufflenet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='shufflenet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='shufflenet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='shufflenet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/ShuffleNetv2/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='shufflenetv2',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='shufflenetv2',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='shufflenetv2',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='shufflenetv2',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/SwinTransformer/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='swintransformer',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='swintransformer',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='swintransformer',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='swintransformer',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/VGG/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='vgg',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='vgg',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='vgg',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='vgg',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/ViT/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='vit',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='vit',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='vit',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='vit',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/ZFNet/code/train.py
CHANGED
@@ -15,7 +15,7 @@ def main():
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
@@ -37,7 +37,8 @@ def main():
|
|
37 |
save_dir='../model',
|
38 |
model_name='zfnet',
|
39 |
batch_size=args.batch_size,
|
40 |
-
num_workers=args.num_workers
|
|
|
41 |
)
|
42 |
elif args.train_type == '2':
|
43 |
train_model_backdoor(
|
@@ -50,7 +51,8 @@ def main():
|
|
50 |
save_dir='../model',
|
51 |
model_name='zfnet',
|
52 |
batch_size=args.batch_size,
|
53 |
-
num_workers=args.num_workers
|
|
|
54 |
)
|
55 |
|
56 |
if __name__ == '__main__':
|
|
|
15 |
|
16 |
if args.train_type == '0':
|
17 |
# 获取数据加载器
|
18 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
19 |
# 训练模型
|
20 |
train_model(
|
21 |
model=model,
|
|
|
37 |
save_dir='../model',
|
38 |
model_name='zfnet',
|
39 |
batch_size=args.batch_size,
|
40 |
+
num_workers=args.num_workers,
|
41 |
+
local_dataset_path=args.dataset_path
|
42 |
)
|
43 |
elif args.train_type == '2':
|
44 |
train_model_backdoor(
|
|
|
51 |
save_dir='../model',
|
52 |
model_name='zfnet',
|
53 |
batch_size=args.batch_size,
|
54 |
+
num_workers=args.num_workers,
|
55 |
+
local_dataset_path=args.dataset_path
|
56 |
)
|
57 |
|
58 |
if __name__ == '__main__':
|
Image/run_all_models.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
# 模型列表
|
6 |
+
models = [
|
7 |
+
'AlexNet', 'DenseNet', 'EfficientNet', 'GoogLeNet', 'LeNet5',
|
8 |
+
'MobileNetv1', 'MobileNetv2', 'MobileNetv3', 'ResNet', 'SENet',
|
9 |
+
'ShuffleNet', 'ShuffleNetv2', 'SwinTransformer', 'VGG', 'ViT', 'ZFNet'
|
10 |
+
]
|
11 |
+
|
12 |
+
|
13 |
+
def run_training(model_name, train_type, script_dir):
|
14 |
+
"""运行指定模型的训练"""
|
15 |
+
model_code_dir = os.path.join(script_dir, model_name, 'code')
|
16 |
+
train_script = os.path.join(model_code_dir, 'train.py')
|
17 |
+
dataset_path = os.path.join(script_dir, 'AlexNet', 'dataset')
|
18 |
+
|
19 |
+
if not os.path.exists(train_script):
|
20 |
+
print(f"警告: {train_script} 不存在,跳过")
|
21 |
+
return
|
22 |
+
|
23 |
+
# 切换到模型的code目录
|
24 |
+
os.chdir(model_code_dir)
|
25 |
+
|
26 |
+
cmd = [
|
27 |
+
'python', 'train.py', # 使用相对路径
|
28 |
+
'--train-type', train_type,
|
29 |
+
'--dataset-path', dataset_path, # 保持dataset_path为绝对路径
|
30 |
+
'--gpu', '1'
|
31 |
+
]
|
32 |
+
|
33 |
+
print(f"\n开始训练 {model_name} (train_type={train_type})")
|
34 |
+
print(f"工作目录: {os.getcwd()}")
|
35 |
+
print(f"执行命令: {' '.join(cmd)}")
|
36 |
+
|
37 |
+
try:
|
38 |
+
subprocess.run(cmd, check=True)
|
39 |
+
print(f"{model_name} (train_type={train_type}) 训练完成")
|
40 |
+
except subprocess.CalledProcessError as e:
|
41 |
+
print(f"错误: {model_name} (train_type={train_type}) 训练失败")
|
42 |
+
print(f"错误信息: {str(e)}")
|
43 |
+
|
44 |
+
def main():
|
45 |
+
# 获取脚本的绝对路径
|
46 |
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
47 |
+
original_dir = os.getcwd() # 保存原始工作目录
|
48 |
+
|
49 |
+
try:
|
50 |
+
# 遍历所有模型和训练类型
|
51 |
+
for model in models:
|
52 |
+
for train_type in ['0', '1', '2']:
|
53 |
+
run_training(model, train_type, script_dir)
|
54 |
+
finally:
|
55 |
+
# 恢复原始工作目录
|
56 |
+
os.chdir(original_dir)
|
57 |
+
|
58 |
+
if __name__ == '__main__':
|
59 |
+
main()
|
Image/utils/parse_args.py
CHANGED
@@ -15,4 +15,5 @@ def parse_args():
|
|
15 |
parser.add_argument('--poison-ratio', type=float, default=0.1, help='恶意样本比例')
|
16 |
parser.add_argument('--target-label', type=int, default=0, help='目标类别')
|
17 |
parser.add_argument('--train-type',type=str,choices=['0','1','2'],default='0',help='训练类型:0 for normal train, 1 for data aug train,2 for back door train')
|
|
|
18 |
return parser.parse_args()
|
|
|
15 |
parser.add_argument('--poison-ratio', type=float, default=0.1, help='恶意样本比例')
|
16 |
parser.add_argument('--target-label', type=int, default=0, help='目标类别')
|
17 |
parser.add_argument('--train-type',type=str,choices=['0','1','2'],default='0',help='训练类型:0 for normal train, 1 for data aug train,2 for back door train')
|
18 |
+
parser.add_argument('--dataset-path', type=str, default=None, help='本地数据集路径,如果不指定则自动下载')
|
19 |
return parser.parse_args()
|
Image/utils/train_utils.py
CHANGED
@@ -260,23 +260,25 @@ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda
|
|
260 |
logger.info(f'Epoch: {epoch+1} | Test Loss: {test_loss/(batch_idx+1):.3f} | '
|
261 |
f'Test Acc: {acc:.2f}%')
|
262 |
|
263 |
-
#
|
264 |
-
|
265 |
-
|
266 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
|
|
267 |
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
# 保存索引信息 - 仅保存数据点的索引列表
|
278 |
-
with open(os.path.join(epoch_dir, 'index.json'), 'w') as f:
|
279 |
-
json.dump(indices, f)
|
280 |
|
281 |
# 如果是最佳精度,额外保存一份
|
282 |
if acc > best_acc:
|
@@ -297,8 +299,8 @@ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda
|
|
297 |
logger.info(f'最佳测试精度: {best_acc:.2f}%')
|
298 |
|
299 |
def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
|
300 |
-
|
301 |
-
|
302 |
"""使用数据增强训练模型
|
303 |
|
304 |
数据增强方案说明:
|
@@ -318,6 +320,7 @@ def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
|
|
318 |
model_name: 模型名称
|
319 |
batch_size: 批次大小
|
320 |
num_workers: 数据加载的工作进程数
|
|
|
321 |
"""
|
322 |
import torchvision.transforms as transforms
|
323 |
from .dataset_utils import get_cifar10_dataloaders
|
@@ -340,7 +343,7 @@ def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
|
|
340 |
])
|
341 |
|
342 |
# 获取数据加载器
|
343 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers)
|
344 |
|
345 |
# 使用增强的训练数据
|
346 |
trainset = trainloader.dataset
|
@@ -352,9 +355,9 @@ def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
|
|
352 |
train_model(model, trainloader, testloader, epochs, lr, device, save_dir, model_name,save_type='1')
|
353 |
|
354 |
def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr=0.1,
|
355 |
-
|
356 |
-
|
357 |
-
"""
|
358 |
|
359 |
后门攻击方案说明:
|
360 |
1. 标签翻转攻击:将选定比例的样本标签修改为目标标签
|
@@ -374,13 +377,14 @@ def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr
|
|
374 |
model_name: 模型名称
|
375 |
batch_size: 批次大小
|
376 |
num_workers: 数据加载的工作进程数
|
|
|
377 |
"""
|
378 |
from .dataset_utils import get_cifar10_dataloaders
|
379 |
import numpy as np
|
380 |
import torch.nn.functional as F
|
381 |
|
382 |
# 获取原始数据加载器
|
383 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers)
|
384 |
|
385 |
# 修改部分训练数据的标签和添加触发器
|
386 |
trainset = trainloader.dataset
|
|
|
260 |
logger.info(f'Epoch: {epoch+1} | Test Loss: {test_loss/(batch_idx+1):.3f} | '
|
261 |
f'Test Acc: {acc:.2f}%')
|
262 |
|
263 |
+
# 每5个epoch保存一次
|
264 |
+
if (epoch + 1) % 5 == 0:
|
265 |
+
# 创建epoch保存目录
|
266 |
+
epoch_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
267 |
+
if not os.path.exists(epoch_dir):
|
268 |
+
os.makedirs(epoch_dir)
|
269 |
+
|
270 |
+
# 保存模型权重
|
271 |
+
model_path = os.path.join(epoch_dir, 'subject_model.pth')
|
272 |
+
torch.save(model.state_dict(), model_path)
|
273 |
|
274 |
+
# 收集并保存嵌入向量
|
275 |
+
embeddings, indices = collect_embeddings(model, trainloader, device)
|
276 |
+
# 保存嵌入向量
|
277 |
+
np.save(os.path.join(epoch_dir, 'train_data.npy'), embeddings)
|
278 |
+
|
279 |
+
# 保存索引信息 - 仅保存数据点的索引列表
|
280 |
+
with open(os.path.join(epoch_dir, 'index.json'), 'w') as f:
|
281 |
+
json.dump(indices, f)
|
|
|
|
|
|
|
|
|
282 |
|
283 |
# 如果是最佳精度,额外保存一份
|
284 |
if acc > best_acc:
|
|
|
299 |
logger.info(f'最佳测试精度: {best_acc:.2f}%')
|
300 |
|
301 |
def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
|
302 |
+
save_dir='../model', model_name='model',
|
303 |
+
batch_size=128, num_workers=2, local_dataset_path=None):
|
304 |
"""使用数据增强训练模型
|
305 |
|
306 |
数据增强方案说明:
|
|
|
320 |
model_name: 模型名称
|
321 |
batch_size: 批次大小
|
322 |
num_workers: 数据加载的工作进程数
|
323 |
+
local_dataset_path: 本地数据集路径
|
324 |
"""
|
325 |
import torchvision.transforms as transforms
|
326 |
from .dataset_utils import get_cifar10_dataloaders
|
|
|
343 |
])
|
344 |
|
345 |
# 获取数据加载器
|
346 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path)
|
347 |
|
348 |
# 使用增强的训练数据
|
349 |
trainset = trainloader.dataset
|
|
|
355 |
train_model(model, trainloader, testloader, epochs, lr, device, save_dir, model_name,save_type='1')
|
356 |
|
357 |
def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr=0.1,
|
358 |
+
device='cuda:0', save_dir='../model', model_name='model',
|
359 |
+
batch_size=128, num_workers=2, local_dataset_path=None):
|
360 |
+
"""训练带后门的模型
|
361 |
|
362 |
后门攻击方案说明:
|
363 |
1. 标签翻转攻击:将选定比例的样本标签修改为目标标签
|
|
|
377 |
model_name: 模型名称
|
378 |
batch_size: 批次大小
|
379 |
num_workers: 数据加载的工作进程数
|
380 |
+
local_dataset_path: 本地数据集路径
|
381 |
"""
|
382 |
from .dataset_utils import get_cifar10_dataloaders
|
383 |
import numpy as np
|
384 |
import torch.nn.functional as F
|
385 |
|
386 |
# 获取原始数据加载器
|
387 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path)
|
388 |
|
389 |
# 修改部分训练数据的标签和添加触发器
|
390 |
trainset = trainloader.dataset
|
README.md
CHANGED
@@ -3,14 +3,64 @@ license: mit
|
|
3 |
---
|
4 |
# 模型训练过程汇总(持续更新中)
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
- **一级目录**:代表不同的模型类别。
|
9 |
-
- **二级目录**:在每个模型类别下,进一步细分为具体的模型名称。
|
10 |
-
- **三级目录**:在每个模型名称下,包含以下三个部分:
|
11 |
-
- `code`:存放与模型相关的代码和训练脚本。(现增加了训练过程的记录,三种`.log`文件记录训练过程)
|
12 |
-
- `model`:收集的模型训练过程,(一级子目录:对应不同数据集),(二级子目录:增加了训练变体的记录,子目录0存储正常训练的过程,字目录1存储数据增强训练的过程,字目录2存储后门攻击训练的过程),(三级子目录:包括每个epoch的`.pth`模型权重文件、`.npy`训练中收集的embedding,以及`index.json`文件,后者包含了embedding对应的数据集中数据点的索引列表)。
|
13 |
-
- `dataset`:提供模型训练使用的数据集,可以是解压后的文件夹形式,或者压缩包形式`dataset.zip`(可以包含多个数据集,需要在代码中进行切换)。
|
14 |
|
15 |
下表汇总了所有收集的模型训练过程信息:
|
16 |
<table>
|
|
|
3 |
---
|
4 |
# 模型训练过程汇总(持续更新中)
|
5 |
|
6 |
+
本仓库采用扁平化的目录结构和标签系统来组织模型,具体说明如下:
|
7 |
+
|
8 |
+
## 仓库结构
|
9 |
+
|
10 |
+
- **一级目录**:直接以模型名称命名,例如 `Clone-detection-BigCloneBench`、`GraphMAE_QM9` 等
|
11 |
+
- **模型目录结构**:每个模型目录下包含:
|
12 |
+
- `code/`:存放模型相关代码和训练脚本
|
13 |
+
- `model/`:存放模型训练过程和权重文件
|
14 |
+
- 按数据集分类
|
15 |
+
- 训练变体(0:标准训练,1:数据增强,2:后门攻击)
|
16 |
+
- 每个epoch的权重文件(.pth)和embedding(.npy)
|
17 |
+
- `dataset/`:训练数据集(解压或压缩包形式)
|
18 |
+
|
19 |
+
## 标签系统
|
20 |
+
|
21 |
+
每个模型都具有以下标签属性:
|
22 |
+
|
23 |
+
1. **数据类型** (data_type)
|
24 |
+
- 代码 (code)
|
25 |
+
- 文本 (text)
|
26 |
+
- 图像 (image)
|
27 |
+
- 图结构 (graph)
|
28 |
+
|
29 |
+
2. **任务类型** (task_type)
|
30 |
+
- 分类 (classification)
|
31 |
+
- 生成 (generation)
|
32 |
+
- 检索 (retrieval)
|
33 |
+
- 相似度计算 (similarity)
|
34 |
+
- 表示学习 (representation_learning)
|
35 |
+
- 自动编码 (autoencoder)
|
36 |
+
- 代码补全 (completion)
|
37 |
+
- 预训练 (pretraining)
|
38 |
+
|
39 |
+
3. **领域** (domain)
|
40 |
+
- 代码克隆检测 (code_clone_detection)
|
41 |
+
- 代码搜索 (code_search)
|
42 |
+
- 分子性质预测 (molecular_property)
|
43 |
+
- 代码缺陷检测 (code_defect_detection)
|
44 |
+
- 计算机视觉 (computer_vision)
|
45 |
+
- 移动端计算 (mobile_computing)
|
46 |
+
- Transformer架构 (transformer)
|
47 |
+
|
48 |
+
4. **输入/输出类型** (input_type/output_type)
|
49 |
+
- 代码 (code)
|
50 |
+
- 代码对 (code_pair)
|
51 |
+
- 代码token序列 (code_tokens)
|
52 |
+
- 代码排序 (code_ranking)
|
53 |
+
- 自然语言 (natural_language)
|
54 |
+
- 图结构 (graph)
|
55 |
+
- 图像 (image)
|
56 |
+
- 二元标签 (binary)
|
57 |
+
- 类别标签 (class_label)
|
58 |
+
- 分子特征 (molecular_features)
|
59 |
+
|
60 |
+
所有模型的元数据和标签信息都存储在 `models.json` 文件中
|
61 |
+
|
62 |
+
可以通过运行 `python model_filter.py` 命令来通过标签进行快速检索和筛选。
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
下表汇总了所有收集的模型训练过程信息:
|
66 |
<table>
|
count.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
# 请将这里的'your_file_path.npy'替换为你的.npy文件的实际路径
|
4 |
+
file_path = '/home/ruofei/RRF/hf-mirror/ttvnet/Image/AlexNet/model/0/epoch_5/train_data.npy'
|
5 |
+
|
6 |
+
# 读取.npy文件
|
7 |
+
data = np.load(file_path)
|
8 |
+
|
9 |
+
# 输出数组的维度
|
10 |
+
print("数组维度:", data.shape)
|
model_filter.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
import json
|
5 |
+
from typing import Dict, List, Set
|
6 |
+
|
7 |
+
class ModelFilter:
|
8 |
+
def __init__(self, json_path: str = "models.json"):
|
9 |
+
with open(json_path, 'r', encoding='utf-8') as f:
|
10 |
+
self.data = json.load(f)
|
11 |
+
self.models = self.data['models']
|
12 |
+
|
13 |
+
# 收集所有可用的标签
|
14 |
+
self.all_tags = {
|
15 |
+
'data_type': set(),
|
16 |
+
'task_type': set(),
|
17 |
+
'domain': set(),
|
18 |
+
'input_type': set(),
|
19 |
+
'output_type': set()
|
20 |
+
}
|
21 |
+
|
22 |
+
# 标签类型的中文名称
|
23 |
+
self.tag_names = {
|
24 |
+
'data_type': '数据类型',
|
25 |
+
'task_type': '任务类型',
|
26 |
+
'domain': '领域',
|
27 |
+
'input_type': '输入类型',
|
28 |
+
'output_type': '输出类型'
|
29 |
+
}
|
30 |
+
|
31 |
+
for model in self.models:
|
32 |
+
tags = model['tags']
|
33 |
+
for tag_type in self.all_tags:
|
34 |
+
if tag_type in tags:
|
35 |
+
if isinstance(tags[tag_type], list):
|
36 |
+
self.all_tags[tag_type].update(tags[tag_type])
|
37 |
+
else:
|
38 |
+
self.all_tags[tag_type].add(tags[tag_type])
|
39 |
+
|
40 |
+
def print_tag_type_options(self):
|
41 |
+
"""打印标签类型选项"""
|
42 |
+
print("\n可选择的标签类型:")
|
43 |
+
for i, (tag_type, name) in enumerate(self.tag_names.items(), 1):
|
44 |
+
print(f"{i}. {name} ({tag_type})")
|
45 |
+
|
46 |
+
def print_tag_options(self, tag_type: str):
|
47 |
+
"""打印特定标签类型的可用选项"""
|
48 |
+
print(f"\n=== {self.tag_names[tag_type]} ===")
|
49 |
+
for i, tag in enumerate(sorted(self.all_tags[tag_type]), 1):
|
50 |
+
print(f"{i}. {tag}")
|
51 |
+
|
52 |
+
def filter_models(self, filters: Dict[str, Set[str]]) -> List[Dict]:
|
53 |
+
"""根据筛选条件过滤模型"""
|
54 |
+
filtered_models = []
|
55 |
+
|
56 |
+
for model in self.models:
|
57 |
+
match = True
|
58 |
+
for tag_type, filter_values in filters.items():
|
59 |
+
if not filter_values: # 如果该类型没有设置筛选条件,跳过
|
60 |
+
continue
|
61 |
+
|
62 |
+
model_tags = model['tags'].get(tag_type, [])
|
63 |
+
if isinstance(model_tags, str):
|
64 |
+
model_tags = [model_tags]
|
65 |
+
|
66 |
+
# 检查是否有交集
|
67 |
+
if not set(model_tags) & filter_values:
|
68 |
+
match = False
|
69 |
+
break
|
70 |
+
|
71 |
+
if match:
|
72 |
+
filtered_models.append(model)
|
73 |
+
|
74 |
+
return filtered_models
|
75 |
+
|
76 |
+
def print_models(self, models: List[Dict]):
|
77 |
+
"""打印模型信息"""
|
78 |
+
if not models:
|
79 |
+
print("\n没有找到匹配的模型。")
|
80 |
+
return
|
81 |
+
|
82 |
+
print(f"\n找到 {len(models)} 个匹配的模型:")
|
83 |
+
for i, model in enumerate(models, 1):
|
84 |
+
print(f"\n{i}. {model['name']}")
|
85 |
+
print(f" 描述: {model['description']}")
|
86 |
+
print(f" 数据集: {model['dataset']}")
|
87 |
+
print(f" 标签:")
|
88 |
+
for tag_type, tags in model['tags'].items():
|
89 |
+
if isinstance(tags, list):
|
90 |
+
print(f" - {self.tag_names[tag_type]}: {', '.join(tags)}")
|
91 |
+
else:
|
92 |
+
print(f" - {self.tag_names[tag_type]}: {tags}")
|
93 |
+
|
94 |
+
def get_user_input(prompt: str, valid_options: Set[str]) -> Set[str]:
|
95 |
+
"""获取用户输入的标签"""
|
96 |
+
print(f"\n{prompt}")
|
97 |
+
print("请输入标签编号(多个标签用空格分隔,直接回车跳过):")
|
98 |
+
while True:
|
99 |
+
try:
|
100 |
+
user_input = input().strip()
|
101 |
+
if not user_input:
|
102 |
+
return set()
|
103 |
+
|
104 |
+
indices = [int(x) - 1 for x in user_input.split()]
|
105 |
+
selected = set()
|
106 |
+
sorted_options = sorted(valid_options)
|
107 |
+
for idx in indices:
|
108 |
+
if 0 <= idx < len(sorted_options):
|
109 |
+
selected.add(sorted_options[idx])
|
110 |
+
else:
|
111 |
+
print(f"无效的选项编号: {idx + 1}")
|
112 |
+
continue
|
113 |
+
return selected
|
114 |
+
except ValueError:
|
115 |
+
print("请输入有效的数字编号。")
|
116 |
+
|
117 |
+
def get_tag_type_choice() -> str:
|
118 |
+
"""获取用户选择的标签类型"""
|
119 |
+
tag_types = list(ModelFilter().tag_names.keys())
|
120 |
+
while True:
|
121 |
+
try:
|
122 |
+
choice = input("\n请选择标签类型编号(直接回车开始筛选):").strip()
|
123 |
+
if not choice:
|
124 |
+
return ""
|
125 |
+
|
126 |
+
idx = int(choice) - 1
|
127 |
+
if 0 <= idx < len(tag_types):
|
128 |
+
return tag_types[idx]
|
129 |
+
else:
|
130 |
+
print("无效的选项编号,请重试。")
|
131 |
+
except ValueError:
|
132 |
+
print("请输入有效的数字编号。")
|
133 |
+
|
134 |
+
def main():
|
135 |
+
print("欢迎使用模型筛选工具!")
|
136 |
+
model_filter = ModelFilter()
|
137 |
+
filters = {}
|
138 |
+
while True:
|
139 |
+
# 显示标签类型选项
|
140 |
+
model_filter.print_tag_type_options()
|
141 |
+
|
142 |
+
# 获取用户选择的标签类型
|
143 |
+
tag_type = get_tag_type_choice()
|
144 |
+
if not tag_type:
|
145 |
+
break
|
146 |
+
|
147 |
+
# 显示所选标签类型的可用选项
|
148 |
+
model_filter.print_tag_options(tag_type)
|
149 |
+
|
150 |
+
# 获取用户选择的标签
|
151 |
+
selected = get_user_input(
|
152 |
+
f"选择{model_filter.tag_names[tag_type]}标签",
|
153 |
+
model_filter.all_tags[tag_type]
|
154 |
+
)
|
155 |
+
|
156 |
+
if selected:
|
157 |
+
filters[tag_type] = selected
|
158 |
+
|
159 |
+
# 筛选并显示结果
|
160 |
+
filtered_models = model_filter.filter_models(filters)
|
161 |
+
model_filter.print_models(filtered_models)
|
162 |
+
# 询问是否继续
|
163 |
+
if input("\n是否继续筛选?(y/n): ").lower() == 'y':
|
164 |
+
main()
|
165 |
+
else:
|
166 |
+
print("\n感谢使用!再见!")
|
167 |
+
if __name__ == "__main__":
|
168 |
+
main()
|
models.json
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"models": [
|
3 |
+
{
|
4 |
+
"name": "Clone-detection-BigCloneBench",
|
5 |
+
"tags": {
|
6 |
+
"data_type": ["code"],
|
7 |
+
"task_type": ["classification", "similarity"],
|
8 |
+
"domain": ["code_clone_detection"],
|
9 |
+
"input_type": "code_pair",
|
10 |
+
"output_type": "binary"
|
11 |
+
},
|
12 |
+
"original_path": "Code-Code/Clone-detection-BigCloneBench",
|
13 |
+
"description": "基于大规模代码克隆基准数据集的代码克隆检测模型,任务是进行二元分类(0/1),其中1代表语义等价,0代表其他情况。",
|
14 |
+
"dataset": "BigCloneBench数据集",
|
15 |
+
"epoch": "待上传"
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"name": "Clone-detection-POJ-104",
|
19 |
+
"tags": {
|
20 |
+
"data_type": ["code"],
|
21 |
+
"task_type": ["retrieval", "similarity"],
|
22 |
+
"domain": ["code_clone_detection"],
|
23 |
+
"input_type": "code",
|
24 |
+
"output_type": "code_ranking"
|
25 |
+
},
|
26 |
+
"original_path": "Code-Code/Clone-detection-POJ-104",
|
27 |
+
"description": "基于POJ-104数据集的代码克隆检测模型,任务是识别不同编程题目中相似的代码实现,给定一段代码和一组候选代码,任务是返回具有相同语义的Top K个代码",
|
28 |
+
"dataset": "POJ-104编程题目数据集",
|
29 |
+
"epoch": "待上传"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"name": "CodeCompletion-token",
|
33 |
+
"tags": {
|
34 |
+
"data_type": ["code"],
|
35 |
+
"task_type": ["generation", "completion"],
|
36 |
+
"domain": ["code_completion"],
|
37 |
+
"input_type": "code_tokens",
|
38 |
+
"output_type": "code_tokens"
|
39 |
+
},
|
40 |
+
"original_path": "Code-Code/CodeCompletion-token",
|
41 |
+
"description": "基于token级别的代码自动补全模型",
|
42 |
+
"dataset": "Java代码token序列数据集",
|
43 |
+
"epoch": "待上传"
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"name": "Defect-detection",
|
47 |
+
"tags": {
|
48 |
+
"data_type": ["code"],
|
49 |
+
"task_type": ["classification"],
|
50 |
+
"domain": ["code_defect_detection"],
|
51 |
+
"input_type": "code",
|
52 |
+
"output_type": "binary"
|
53 |
+
},
|
54 |
+
"original_path": "Code-Code/Defect-detection",
|
55 |
+
"description": "代码缺陷检测模型,通过分析代码来识别潜在的缺陷和错误(进行二元分类(0/1))",
|
56 |
+
"dataset": "包含缺陷标注的C语言代码数据集",
|
57 |
+
"epoch": "待上传"
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"name": "code-refinement",
|
61 |
+
"tags": {
|
62 |
+
"data_type": ["code"],
|
63 |
+
"task_type": ["generation", "optimization"],
|
64 |
+
"domain": ["code_optimization"],
|
65 |
+
"input_type": "code",
|
66 |
+
"output_type": "code"
|
67 |
+
},
|
68 |
+
"original_path": "Code-Code/code-refinement",
|
69 |
+
"description": "代码优化模型",
|
70 |
+
"dataset": "代码优化前后对数据集(C语言)",
|
71 |
+
"epoch": "待上传"
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"name": "code-to-text",
|
75 |
+
"tags": {
|
76 |
+
"data_type": ["code", "text"],
|
77 |
+
"task_type": ["generation", "translation"],
|
78 |
+
"domain": ["code_documentation"],
|
79 |
+
"input_type": "code",
|
80 |
+
"output_type": "text"
|
81 |
+
},
|
82 |
+
"original_path": "Code-Text/code-to-text",
|
83 |
+
"description": "代码到自然语言的转换模型",
|
84 |
+
"dataset": "多语言代码-文本对数据集",
|
85 |
+
"epoch": "待上传"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"name": "NL-code-search-Adv",
|
89 |
+
"tags": {
|
90 |
+
"data_type": ["code", "text"],
|
91 |
+
"task_type": ["retrieval", "search"],
|
92 |
+
"domain": ["code_search"],
|
93 |
+
"input_type": "text",
|
94 |
+
"output_type": "code"
|
95 |
+
},
|
96 |
+
"original_path": "Text-code/NL-code-search-Adv",
|
97 |
+
"description": "高级自然语言代码搜索模型,通过计算自然语言查询与代码片段之间的相似性来实现代码搜索",
|
98 |
+
"dataset": "自然语言-(python)代码对数据集",
|
99 |
+
"epoch": "待上传"
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"name": "NL-code-search-WebQuery",
|
103 |
+
"tags": {
|
104 |
+
"data_type": ["code", "text"],
|
105 |
+
"task_type": ["retrieval", "search"],
|
106 |
+
"domain": ["code_search"],
|
107 |
+
"input_type": "text",
|
108 |
+
"output_type": "code"
|
109 |
+
},
|
110 |
+
"original_path": "Text-code/NL-code-search-WebQuery",
|
111 |
+
"description": "基于Web查询的代码搜索模型,该模型通过编码器处理代码和自然语言输入,并利用多层感知器(MLP)来计算相似性得分",
|
112 |
+
"dataset": "Web查询-代码对数据集(CodeSearchNet数据集和CoSQA数据集(python))",
|
113 |
+
"epoch": "待上传"
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"name": "text-to-code",
|
117 |
+
"tags": {
|
118 |
+
"data_type": ["code", "text"],
|
119 |
+
"task_type": ["generation"],
|
120 |
+
"domain": ["code_generation"],
|
121 |
+
"input_type": "text",
|
122 |
+
"output_type": "code"
|
123 |
+
},
|
124 |
+
"original_path": "Text-code/text-to-code",
|
125 |
+
"description": "自然语言到代码的生成模型",
|
126 |
+
"dataset": "文本描述-代码(c语言)对数据集",
|
127 |
+
"epoch": "待上传"
|
128 |
+
},
|
129 |
+
{
|
130 |
+
"name": "GraphMAE_QM9",
|
131 |
+
"tags": {
|
132 |
+
"data_type": ["graph"],
|
133 |
+
"task_type": ["representation_learning", "autoencoder"],
|
134 |
+
"domain": ["molecular_property"],
|
135 |
+
"input_type": "molecular_graph",
|
136 |
+
"output_type": "graph_embedding"
|
137 |
+
},
|
138 |
+
"original_path": "Graph/GraphMAE_QM9",
|
139 |
+
"description": "在QM9数据集上训练的图掩码自编码器,通过对分子图中的原子的坐标以及类型进行预测实现自监督训练",
|
140 |
+
"dataset": "分子属性预测数据集",
|
141 |
+
"epoch": "待上传"
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"name": "AlexNet",
|
145 |
+
"tags": {
|
146 |
+
"data_type": ["image"],
|
147 |
+
"task_type": ["classification"],
|
148 |
+
"domain": ["computer_vision"],
|
149 |
+
"input_type": "image",
|
150 |
+
"output_type": "class_label"
|
151 |
+
},
|
152 |
+
"original_path": "Image/AlexNet",
|
153 |
+
"description": "2012年获得ImageNet冠军的经典模型,首次证明了深度学习在图像识别上的强大能力。",
|
154 |
+
"dataset": "CIFAR-10数据集",
|
155 |
+
"epoch": "待补充"
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"name": "DenseNet",
|
159 |
+
"tags": {
|
160 |
+
"data_type": ["image"],
|
161 |
+
"task_type": ["classification"],
|
162 |
+
"domain": ["computer_vision"],
|
163 |
+
"input_type": "image",
|
164 |
+
"output_type": "class_label"
|
165 |
+
},
|
166 |
+
"original_path": "Image/DenseNet",
|
167 |
+
"description": "每一层都直接与其他所有层相连,像搭积木一样层层堆叠,可以更好地学习图像特征。",
|
168 |
+
"dataset": "CIFAR-10数据集",
|
169 |
+
"epoch": "待补充"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"name": "EfficientNet",
|
173 |
+
"tags": {
|
174 |
+
"data_type": ["image"],
|
175 |
+
"task_type": ["classification"],
|
176 |
+
"domain": ["computer_vision"],
|
177 |
+
"input_type": "image",
|
178 |
+
"output_type": "class_label"
|
179 |
+
},
|
180 |
+
"original_path": "Image/EfficientNet",
|
181 |
+
"description": "通过平衡网络的深度、宽度和图像分辨率,用更少的计算量达到更好的效果。",
|
182 |
+
"dataset": "CIFAR-10数据集",
|
183 |
+
"epoch": "待补充"
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"name": "GoogLeNet",
|
187 |
+
"tags": {
|
188 |
+
"data_type": ["image"],
|
189 |
+
"task_type": ["classification"],
|
190 |
+
"domain": ["computer_vision"],
|
191 |
+
"input_type": "image",
|
192 |
+
"output_type": "class_label"
|
193 |
+
},
|
194 |
+
"original_path": "Image/GoogLeNet",
|
195 |
+
"description": "谷歌开发的模型,像多个眼睛同时看图片的不同部分,既省资源又准确。",
|
196 |
+
"dataset": "CIFAR-10数据集",
|
197 |
+
"epoch": "待补充"
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"name": "LeNet5",
|
201 |
+
"tags": {
|
202 |
+
"data_type": ["image"],
|
203 |
+
"task_type": ["classification"],
|
204 |
+
"domain": ["computer_vision"],
|
205 |
+
"input_type": "image",
|
206 |
+
"output_type": "class_label"
|
207 |
+
},
|
208 |
+
"original_path": "Image/LeNet5",
|
209 |
+
"description": "深度学习领域的开山之作,虽然简单但奠定了现代CNN的基础架构。",
|
210 |
+
"dataset": "CIFAR-10数据集",
|
211 |
+
"epoch": "待补充"
|
212 |
+
},
|
213 |
+
{
|
214 |
+
"name": "MobileNetv1",
|
215 |
+
"tags": {
|
216 |
+
"data_type": ["image"],
|
217 |
+
"task_type": ["classification"],
|
218 |
+
"domain": ["computer_vision", "mobile_computing"],
|
219 |
+
"input_type": "image",
|
220 |
+
"output_type": "class_label"
|
221 |
+
},
|
222 |
+
"original_path": "Image/MobileNetv1",
|
223 |
+
"description": "专门为手机设计的轻量级模型,用特殊的卷积方式减少计算量。",
|
224 |
+
"dataset": "CIFAR-10数据集",
|
225 |
+
"epoch": "待补充"
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"name": "MobileNetv2",
|
229 |
+
"tags": {
|
230 |
+
"data_type": ["image"],
|
231 |
+
"task_type": ["classification"],
|
232 |
+
"domain": ["computer_vision", "mobile_computing"],
|
233 |
+
"input_type": "image",
|
234 |
+
"output_type": "class_label"
|
235 |
+
},
|
236 |
+
"original_path": "Image/MobileNetv2",
|
237 |
+
"description": "MobileNet的升级版,增加了特征复用机制,性能更好。",
|
238 |
+
"dataset": "CIFAR-10数据集",
|
239 |
+
"epoch": "待补充"
|
240 |
+
},
|
241 |
+
{
|
242 |
+
"name": "MobileNetv3",
|
243 |
+
"tags": {
|
244 |
+
"data_type": ["image"],
|
245 |
+
"task_type": ["classification"],
|
246 |
+
"domain": ["computer_vision", "mobile_computing"],
|
247 |
+
"input_type": "image",
|
248 |
+
"output_type": "class_label"
|
249 |
+
},
|
250 |
+
"original_path": "Image/MobileNetv3",
|
251 |
+
"description": "结合自动搜索技术的新版本,自动找到最适合手机的网络结构。",
|
252 |
+
"dataset": "CIFAR-10数据集",
|
253 |
+
"epoch": "待补充"
|
254 |
+
},
|
255 |
+
{
|
256 |
+
"name": "ResNet",
|
257 |
+
"tags": {
|
258 |
+
"data_type": ["image"],
|
259 |
+
"task_type": ["classification"],
|
260 |
+
"domain": ["computer_vision"],
|
261 |
+
"input_type": "image",
|
262 |
+
"output_type": "class_label"
|
263 |
+
},
|
264 |
+
"original_path": "Image/ResNet",
|
265 |
+
"description": "通过特殊的快捷连接解决深层网络训练难的问题,可以训练超级深的网络。",
|
266 |
+
"dataset": "CIFAR-10数据集",
|
267 |
+
"epoch": "待补充"
|
268 |
+
},
|
269 |
+
{
|
270 |
+
"name": "SENet",
|
271 |
+
"tags": {
|
272 |
+
"data_type": ["image"],
|
273 |
+
"task_type": ["classification"],
|
274 |
+
"domain": ["computer_vision"],
|
275 |
+
"input_type": "image",
|
276 |
+
"output_type": "class_label"
|
277 |
+
},
|
278 |
+
"original_path": "Image/SENet",
|
279 |
+
"description": "为网络添加了注意力机制,让模型能够关注图片中重要的部分。",
|
280 |
+
"dataset": "CIFAR-10数据集",
|
281 |
+
"epoch": "待补充"
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"name": "ShuffleNet",
|
285 |
+
"tags": {
|
286 |
+
"data_type": ["image"],
|
287 |
+
"task_type": ["classification"],
|
288 |
+
"domain": ["computer_vision", "mobile_computing"],
|
289 |
+
"input_type": "image",
|
290 |
+
"output_type": "class_label"
|
291 |
+
},
|
292 |
+
"original_path": "Image/ShuffleNet",
|
293 |
+
"description": "通过巧妙地打乱和分组计算,实现了手机上的高效运行。",
|
294 |
+
"dataset": "CIFAR-10数据集",
|
295 |
+
"epoch": "待补充"
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"name": "ShuffleNetv2",
|
299 |
+
"tags": {
|
300 |
+
"data_type": ["image"],
|
301 |
+
"task_type": ["classification"],
|
302 |
+
"domain": ["computer_vision", "mobile_computing"],
|
303 |
+
"input_type": "image",
|
304 |
+
"output_type": "class_label"
|
305 |
+
},
|
306 |
+
"original_path": "Image/ShuffleNetv2",
|
307 |
+
"description": "在原版基础上优化设计,速度更快,效果更好。",
|
308 |
+
"dataset": "CIFAR-10数据集",
|
309 |
+
"epoch": "待补充"
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"name": "SwinTransformer",
|
313 |
+
"tags": {
|
314 |
+
"data_type": ["image"],
|
315 |
+
"task_type": ["classification"],
|
316 |
+
"domain": ["computer_vision", "transformer"],
|
317 |
+
"input_type": "image",
|
318 |
+
"output_type": "class_label"
|
319 |
+
},
|
320 |
+
"original_path": "Image/SwinTransformer",
|
321 |
+
"description": "把自然语言处理的先进技术用于图像,通过逐步关注图片不同区域来理解图像。",
|
322 |
+
"dataset": "CIFAR-10数据集",
|
323 |
+
"epoch": "待补充"
|
324 |
+
},
|
325 |
+
{
|
326 |
+
"name": "VGG",
|
327 |
+
"tags": {
|
328 |
+
"data_type": ["image"],
|
329 |
+
"task_type": ["classification"],
|
330 |
+
"domain": ["computer_vision"],
|
331 |
+
"input_type": "image",
|
332 |
+
"output_type": "class_label"
|
333 |
+
},
|
334 |
+
"original_path": "Image/VGG",
|
335 |
+
"description": "用统一的小型卷积核堆叠成深层网络,结构简单但效果好。",
|
336 |
+
"dataset": "CIFAR-10数据集",
|
337 |
+
"epoch": "待补充"
|
338 |
+
},
|
339 |
+
{
|
340 |
+
"name": "ViT",
|
341 |
+
"tags": {
|
342 |
+
"data_type": ["image"],
|
343 |
+
"task_type": ["classification"],
|
344 |
+
"domain": ["computer_vision", "transformer"],
|
345 |
+
"input_type": "image",
|
346 |
+
"output_type": "class_label"
|
347 |
+
},
|
348 |
+
"original_path": "Image/ViT",
|
349 |
+
"description": "把图片切成小块后像读文章一样处理,是一种全新的图像处理方式。",
|
350 |
+
"dataset": "CIFAR-10数据集",
|
351 |
+
"epoch": "待补充"
|
352 |
+
},
|
353 |
+
{
|
354 |
+
"name": "ZFNet",
|
355 |
+
"tags": {
|
356 |
+
"data_type": ["image"],
|
357 |
+
"task_type": ["classification"],
|
358 |
+
"domain": ["computer_vision"],
|
359 |
+
"input_type": "image",
|
360 |
+
"output_type": "class_label"
|
361 |
+
},
|
362 |
+
"original_path": "Image/ZFNet",
|
363 |
+
"description": "通过可视化研究改进的AlexNet,帮助人们理解网络是如何看图片的。",
|
364 |
+
"dataset": "CIFAR-10数据集",
|
365 |
+
"epoch": "待补充"
|
366 |
+
}
|
367 |
+
]
|
368 |
+
}
|