Use batch size consistently
#1
by
dennis-rall
- opened
README.md
CHANGED
@@ -74,10 +74,10 @@ output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_featu
|
|
74 |
# or equivalently (without needing to set num_classes=0)
|
75 |
|
76 |
output = model.forward_features(transforms(img).unsqueeze(0))
|
77 |
-
# output is unpooled, a (
|
78 |
|
79 |
output = model.forward_head(output, pre_logits=True)
|
80 |
-
# output is a (
|
81 |
```
|
82 |
|
83 |
## Model Comparison
|
|
|
74 |
# or equivalently (without needing to set num_classes=0)
|
75 |
|
76 |
output = model.forward_features(transforms(img).unsqueeze(0))
|
77 |
+
# output is unpooled, a (batch_size, 257, 1408) shaped tensor
|
78 |
|
79 |
output = model.forward_head(output, pre_logits=True)
|
80 |
+
# output is a (batch_size, num_features) shaped tensor
|
81 |
```
|
82 |
|
83 |
## Model Comparison
|