File size: 3,197 Bytes
4f26579
 
 
 
c050f44
4f26579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
{
    "imports": [
        "$import glob"
    ],
    "bundle_root": ".",
    "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
    "ckpt_path": "$@bundle_root + '/models/model.pt'",
    "dataset_dir": "/workspace/data",
    "output_dir": "./output",
    "datalist": "$list(sorted(glob.glob(@dataset_dir + '/*.nii*')))",
    "network_def": {
        "_target_": "UNet",
        "spatial_dims": 2,
        "in_channels": 1,
        "out_channels": 4,
        "channels": [
            16,
            32,
            64,
            128,
            256
        ],
        "strides": [
            2,
            2,
            2,
            2
        ],
        "num_res_units": 2
    },
    "network": "$@network_def.to(@device)",
    "preprocessing": {
        "_target_": "Compose",
        "transforms": [
            {
                "_target_": "LoadImaged",
                "keys": "image"
            },
            {
                "_target_": "EnsureChannelFirstd",
                "keys": "image"
            },
            {
                "_target_": "ScaleIntensityd",
                "keys": "image"
            },
            {
                "_target_": "EnsureTyped",
                "keys": "image",
                "device": "@device"
            }
        ]
    },
    "dataset": {
        "_target_": "Dataset",
        "data": "$[{'image': i} for i in @datalist]",
        "transform": "@preprocessing"
    },
    "dataloader": {
        "_target_": "DataLoader",
        "dataset": "@dataset",
        "batch_size": 1,
        "shuffle": false,
        "num_workers": 0
    },
    "inferer": {
        "_target_": "SliceInferer",
        "roi_size": [
            256,
            256
        ],
        "spatial_dim": 2
    },
    "postprocessing": {
        "_target_": "Compose",
        "transforms": [
            {
                "_target_": "Activationsd",
                "keys": "pred",
                "softmax": true
            },
            {
                "_target_": "Invertd",
                "keys": "pred",
                "transform": "@preprocessing",
                "orig_keys": "image",
                "meta_key_postfix": "meta_dict",
                "nearest_interp": false,
                "to_tensor": true
            },
            {
                "_target_": "AsDiscreted",
                "keys": "pred",
                "argmax": true
            },
            {
                "_target_": "SaveImaged",
                "keys": "pred",
                "meta_keys": "pred_meta_dict",
                "output_dir": "@output_dir"
            }
        ]
    },
    "handlers": [
        {
            "_target_": "CheckpointLoader",
            "load_path": "@ckpt_path",
            "load_dict": {
                "model": "@network"
            }
        }
    ],
    "evaluator": {
        "_target_": "SupervisedEvaluator",
        "device": "@device",
        "val_data_loader": "@dataloader",
        "network": "@network",
        "inferer": "@inferer",
        "postprocessing": "@postprocessing",
        "val_handlers": "@handlers"
    },
    "evaluating": [
        "$@evaluator.run()"
    ]
}