File size: 1,400 Bytes
c312b9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
{
  "_name_or_path": "facebook/convnext-tiny-224",
  "architectures": [
    "ConvNextForImageClassification"
  ],
  "depths": [
    3,
    3,
    9,
    3
  ],
  "drop_path_rate": 0.0,
  "hidden_act": "gelu",
  "hidden_sizes": [
    96,
    192,
    384,
    768
  ],
  "id2label": {
    "0": "calling",
    "1": "clapping",
    "2": "cycling",
    "3": "dancing",
    "4": "drinking",
    "5": "eating",
    "6": "fighting",
    "7": "hugging",
    "8": "laughing",
    "9": "listening_to_music",
    "10": "running",
    "11": "sitting",
    "12": "sleeping",
    "13": "texting",
    "14": "using_laptop"
  },
  "image_size": 224,
  "initializer_range": 0.02,
  "label2id": {
    "calling": 0,
    "clapping": 1,
    "cycling": 2,
    "dancing": 3,
    "drinking": 4,
    "eating": 5,
    "fighting": 6,
    "hugging": 7,
    "laughing": 8,
    "listening_to_music": 9,
    "running": 10,
    "sitting": 11,
    "sleeping": 12,
    "texting": 13,
    "using_laptop": 14
  },
  "layer_norm_eps": 1e-12,
  "layer_scale_init_value": 1e-06,
  "model_type": "convnext",
  "num_channels": 3,
  "num_stages": 4,
  "out_features": [
    "stage4"
  ],
  "out_indices": [
    4
  ],
  "patch_size": 4,
  "problem_type": "single_label_classification",
  "stage_names": [
    "stem",
    "stage1",
    "stage2",
    "stage3",
    "stage4"
  ],
  "torch_dtype": "float32",
  "transformers_version": "4.41.0"
}