File size: 1,246 Bytes
08921ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
{
"_name_or_path": "google/efficientnet-b4",
"architectures": [
"EfficientNetForImageClassification"
],
"batch_norm_eps": 0.001,
"batch_norm_momentum": 0.99,
"depth_coefficient": 1.8,
"depth_divisor": 8,
"depthwise_padding": [
6
],
"drop_connect_rate": 0.2,
"dropout_rate": 0.4,
"expand_ratios": [
1,
6,
6,
6,
6,
6,
6
],
"hidden_act": "swish",
"hidden_dim": 1792,
"id2label": {
"0": "ng",
"1": "ok"
},
"image_size": 380,
"in_channels": [
32,
16,
24,
40,
80,
112,
192
],
"initializer_range": 0.02,
"kernel_sizes": [
3,
3,
5,
3,
5,
5,
3
],
"label2id": {
"ng": "0",
"ok": "1"
},
"model_type": "efficientnet",
"num_block_repeats": [
1,
2,
2,
3,
3,
4,
1
],
"num_channels": 3,
"num_hidden_layers": 64,
"out_channels": [
16,
24,
40,
80,
112,
192,
320
],
"pooling_type": "mean",
"problem_type": "single_label_classification",
"squeeze_expansion_ratio": 0.25,
"strides": [
1,
2,
2,
2,
1,
2,
1
],
"torch_dtype": "float32",
"transformers_version": "4.42.4",
"width_coefficient": 1.4
}
|