Scotths commited on
Commit
a6b757d
·
1 Parent(s): 9e0449a

adding app

Browse files
Files changed (3) hide show
  1. app.py +33 -0
  2. config.json +34 -0
  3. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import torch
3
+ from transformers import AutoFeatureExtractor, AutoModelForImageClassification
4
+
5
+ description = Upload a picture of your bean plant to determine if they are healthy or diseased
6
+ title = Bean Plant Disease Identifier
7
+
8
+ dataset = datasets.load_dataset('beans')
9
+
10
+ feature_extractor = AutoFeatureExtractor.from_pretrained(saved_model_files)
11
+ model = AutoModelForImageClassification.from_pretrained(saved_model_files)
12
+
13
+ labels = dataset['train'].features['labels'].names
14
+
15
+ def classify(im):
16
+ features = feature_extractor(im, return_tensors='pt')
17
+ logits = model(features[pixel_values])[-1]
18
+ probability = torch.nn.functional.softmax(logits, dim=-1)
19
+ probs = probability[0].detach().numpy()
20
+ confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
21
+ return confidences
22
+
23
+ import gradio as gr
24
+
25
+ interface = gr.Interface(
26
+ classify,
27
+ inputs='image',
28
+ outputs='label',
29
+ title=title,
30
+ description=description
31
+ )
32
+
33
+ interface.launch(debug=True)
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ _name_or_path: google/vit-base-patch16-224,
3
+ architectures: [
4
+ ViTForImageClassification
5
+ ],
6
+ attention_probs_dropout_prob: 0.0,
7
+ encoder_stride: 16,
8
+ hidden_act: gelu,
9
+ hidden_dropout_prob: 0.0,
10
+ hidden_size: 768,
11
+ id2label: {
12
+ 0: angular_leaf_spot,
13
+ 1: bean_rust,
14
+ 2: healthy
15
+ },
16
+ image_size: 224,
17
+ initializer_range: 0.02,
18
+ intermediate_size: 3072,
19
+ label2id: {
20
+ angular_leaf_spot: 0,
21
+ bean_rust: 1,
22
+ healthy: 2
23
+ },
24
+ layer_norm_eps: 1e-12,
25
+ model_type: vit,
26
+ num_attention_heads: 12,
27
+ num_channels: 3,
28
+ num_hidden_layers: 12,
29
+ patch_size: 16,
30
+ problem_type: single_label_classification,
31
+ qkv_bias: true,
32
+ torch_dtype: float32,
33
+ transformers_version: 4.28.1
34
+ }
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ transformers