pyesonekyaw commited on
Commit
b2f544f
Β·
verified Β·
1 Parent(s): 4eb017d

Upload 13 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/bubul.jpg filter=lfs diff=lfs merge=lfs -text
37
+ examples/hornbill.jpg filter=lfs diff=lfs merge=lfs -text
38
+ examples/lizard.jpg filter=lfs diff=lfs merge=lfs -text
39
+ examples/monkey.jpg filter=lfs diff=lfs merge=lfs -text
40
+ examples/otter.jpg filter=lfs diff=lfs merge=lfs -text
41
+ photo_lookup.json filter=lfs diff=lfs merge=lfs -text
42
+ species_lookup.json filter=lfs diff=lfs merge=lfs -text
43
+ txt_emb_species.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,14 +1,12 @@
1
  ---
2
  title: Biome
3
  emoji: πŸ‘€
4
- colorFrom: indigo
5
- colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 5.8.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: Multimodal search & retrieval-based biodiversity recognition
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Biome
3
  emoji: πŸ‘€
4
+ colorFrom: green
5
+ colorTo: green
6
  sdk: gradio
7
  sdk_version: 5.8.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: Multimodal search & retrieval-based biodiversity recognition
12
+ ---
 
 
app.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import heapq
3
+ import json
4
+ import os
5
+ import logging
6
+ import faiss
7
+ import requests
8
+ import gradio as gr
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from open_clip import create_model, get_tokenizer
13
+ from torchvision import transforms
14
+ from PIL import Image
15
+ import io
16
+ from pathlib import Path
17
+ from huggingface_hub import hf_hub_download
18
+
19
+ log_format = "[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s"
20
+ logging.basicConfig(level=logging.INFO, format=log_format)
21
+ logger = logging.getLogger()
22
+
23
+ hf_token = os.getenv("HF_TOKEN")
24
+
25
+ model_str = "hf-hub:imageomics/bioclip"
26
+ tokenizer_str = "ViT-B-16"
27
+
28
+ txt_emb_npy = hf_hub_download(repo_id="pyesonekyaw/biome_lfs", filename='txt_emb_species.npy', repo_type="dataset")
29
+ txt_names_json = "txt_emb_species.json"
30
+
31
+ min_prob = 1e-9
32
+ k = 5
33
+
34
+ ranks = ("Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species")
35
+
36
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
37
+
38
+ preprocess_img = transforms.Compose(
39
+ [
40
+ transforms.ToTensor(),
41
+ transforms.Resize((224, 224), antialias=True),
42
+ transforms.Normalize(
43
+ mean=(0.48145466, 0.4578275, 0.40821073),
44
+ std=(0.26862954, 0.26130258, 0.27577711),
45
+ ),
46
+ ]
47
+ )
48
+
49
+ MIN_PROB = 1e-9
50
+ TOP_K_PREDICTIONS = 5
51
+ TOP_K_CANDIDATES = 250
52
+ TOP_N_SIMILAR = 22
53
+ SIMILARITY_BOOST = 0.2
54
+ VOTE_THRESHOLD = 3
55
+ SIMILARITY_THRESHOLD = 0.99
56
+
57
+ # Add paths for RAG
58
+ PHOTO_LOOKUP_PATH = f"./photo_lookup.json"
59
+ SPECIES_LOOKUP_PATH = f"./species_lookup.json"
60
+
61
+ theme = gr.themes.Base(
62
+ primary_hue=gr.themes.colors.teal,
63
+ secondary_hue=gr.themes.colors.blue,
64
+ neutral_hue=gr.themes.colors.gray,
65
+ text_size=gr.themes.sizes.text_lg,
66
+ ).set(
67
+ button_primary_background_fill="#114A56",
68
+ button_primary_background_fill_hover="#114A56",
69
+ block_title_text_weight="600",
70
+ block_label_text_weight="600",
71
+ block_label_text_size="*text_md",
72
+ )
73
+
74
+ EXAMPLES_DIR = Path("examples")
75
+ example_images = sorted(str(p) for p in EXAMPLES_DIR.glob("*.jpg"))
76
+
77
+ def indexed(lst, indices):
78
+ return [lst[i] for i in indices]
79
+
80
+ def format_name(taxon, common):
81
+ taxon = " ".join(taxon)
82
+ if not common:
83
+ return taxon
84
+ return f"{taxon} ({common})"
85
+
86
+ def combine_duplicate_predictions(predictions):
87
+ """Combine predictions where one name is contained within another."""
88
+ combined = {}
89
+ used = set()
90
+
91
+ # Sort by length of name (longer names first) and probability
92
+ items = sorted(predictions.items(), key=lambda x: (-len(x[0]), -x[1]))
93
+
94
+ for name1, prob1 in items:
95
+ if name1 in used:
96
+ continue
97
+
98
+ total_prob = prob1
99
+ used.add(name1)
100
+
101
+ # Check remaining predictions
102
+ for name2, prob2 in predictions.items():
103
+ if name2 in used:
104
+ continue
105
+
106
+ # Convert to lowercase for comparison
107
+ name1_lower = name1.lower()
108
+ name2_lower = name2.lower()
109
+
110
+ # Check if one name contains the other
111
+ if name1_lower in name2_lower or name2_lower in name1_lower:
112
+ total_prob += prob2
113
+ used.add(name2)
114
+
115
+ combined[name1] = total_prob
116
+
117
+ # Normalize probabilities
118
+ total = sum(combined.values())
119
+ return {k: v/total for k, v in combined.items()}
120
+
121
+ @torch.no_grad()
122
+ def open_domain_classification(img, rank: int, return_all=False):
123
+ """
124
+ Predicts from the entire tree of life using RAG approach.
125
+ """
126
+ logger.info(f"Starting open domain classification for rank: {rank}")
127
+ img = preprocess_img(img).to(device)
128
+ img_features = model.encode_image(img.unsqueeze(0))
129
+ img_features = F.normalize(img_features, dim=-1)
130
+
131
+ # Get zero-shot predictions
132
+ logits = (model.logit_scale.exp() * img_features @ txt_emb).squeeze()
133
+ probs = F.softmax(logits, dim=0)
134
+
135
+ # Get similar images votes and metadata
136
+ species_votes, similar_images = get_similar_images_metadata(img_features, faiss_index, id_mapping, name_mapping)
137
+
138
+ if rank + 1 == len(ranks):
139
+ # Species level prediction
140
+ topk = probs.topk(TOP_K_CANDIDATES)
141
+ predictions = {
142
+ format_name(*txt_names[i]): prob.item()
143
+ for i, prob in zip(topk.indices, topk.values)
144
+ }
145
+
146
+ # Augment predictions with votes
147
+ augmented_predictions = predictions.copy()
148
+ for pred_name in predictions:
149
+ pred_name_lower = pred_name.lower()
150
+ for voted_species, vote_count in species_votes.items():
151
+ if voted_species in pred_name_lower or pred_name_lower in voted_species:
152
+ augmented_predictions[pred_name] += SIMILARITY_BOOST * vote_count
153
+ elif vote_count >= VOTE_THRESHOLD:
154
+ augmented_predictions[voted_species] = vote_count * SIMILARITY_BOOST
155
+
156
+ # Sort predictions
157
+ sorted_predictions = dict(sorted(
158
+ augmented_predictions.items(),
159
+ key=lambda x: x[1],
160
+ reverse=True
161
+ )[:k])
162
+
163
+ # Normalize and combine duplicates
164
+ total = sum(sorted_predictions.values())
165
+ sorted_predictions = {k: v/total for k, v in sorted_predictions.items()}
166
+ sorted_predictions = combine_duplicate_predictions(sorted_predictions)
167
+
168
+ logger.info(f"Top K predictions after combining duplicates: {sorted_predictions}")
169
+ return sorted_predictions, similar_images
170
+
171
+ # Higher rank prediction
172
+ output = collections.defaultdict(float)
173
+ for i in torch.nonzero(probs > MIN_PROB).squeeze():
174
+ output[" ".join(txt_names[i][0][: rank + 1])] += probs[i]
175
+
176
+ # Incorporate votes for higher ranks
177
+ for species, vote_count in species_votes.items():
178
+ try:
179
+ # Find matching taxonomy in txt_names
180
+ for taxonomy, _ in txt_names:
181
+ if species in " ".join(taxonomy).lower():
182
+ higher_rank = " ".join(taxonomy[: rank + 1])
183
+ output[higher_rank] += SIMILARITY_BOOST * vote_count
184
+ break
185
+ except Exception as e:
186
+ logger.error(f"Error processing vote for species {species}: {e}")
187
+
188
+ # Get top-k predictions and normalize
189
+ topk_names = heapq.nlargest(k, output, key=output.get)
190
+ prediction_dict = {name: output[name] for name in topk_names}
191
+
192
+ # Normalize probabilities to sum to 1
193
+ total = sum(prediction_dict.values())
194
+ prediction_dict = {k: v/total for k, v in prediction_dict.items()}
195
+ prediction_dict = combine_duplicate_predictions(prediction_dict)
196
+
197
+ logger.info(f"Prediction dictionary after combining duplicates: {prediction_dict}")
198
+
199
+ return prediction_dict, similar_images
200
+
201
+
202
+ def change_output(choice):
203
+ return gr.Label(num_top_classes=k, label=ranks[choice], show_label=True, value=None)
204
+
205
+ def get_cache_paths(name="demo"):
206
+ """Get paths for cached FAISS index and ID mapping."""
207
+ return {
208
+ 'index': hf_hub_download(repo_id="pyesonekyaw/biome_lfs", filename='cache/faiss_cache_demo.index', repo_type="dataset"),
209
+ 'mapping': hf_hub_download(repo_id="pyesonekyaw/biome_lfs", filename='cache/faiss_cache_demo_mapping.json', repo_type="dataset")
210
+ }
211
+
212
+ def build_name_mapping(txt_names):
213
+ """Build mapping between scientific names and common names."""
214
+ name_mapping = {}
215
+ for taxonomy, common_name in txt_names:
216
+ if not common_name:
217
+ continue
218
+ if len(taxonomy) >= 2:
219
+ scientific_name = f"{taxonomy[-2]} {taxonomy[-1]}".lower()
220
+ common_name = common_name.lower()
221
+ name_mapping[scientific_name] = (scientific_name, common_name)
222
+ name_mapping[common_name] = (scientific_name, common_name)
223
+ return name_mapping
224
+
225
+ def load_faiss_index():
226
+ """Load FAISS index from cache."""
227
+ cache_paths = get_cache_paths()
228
+ logger.info("Loading FAISS index from cache...")
229
+ index = faiss.read_index(cache_paths['index'])
230
+ with open(cache_paths['mapping'], 'r') as f:
231
+ id_mapping = json.load(f)
232
+ return index, id_mapping
233
+
234
+ def get_similar_images_metadata(img_embedding, faiss_index, id_mapping, name_mapping):
235
+ """Get metadata for similar images using FAISS search."""
236
+ img_embedding_np = img_embedding.cpu().numpy()
237
+ if img_embedding_np.ndim == 1:
238
+ img_embedding_np = img_embedding_np.reshape(1, -1)
239
+
240
+ # Search for more images than needed to account for filtered matches
241
+ distances, indices = faiss_index.search(img_embedding_np, TOP_N_SIMILAR * 2)
242
+
243
+ # Filter out near-exact matches
244
+ valid_indices = []
245
+ valid_distances = []
246
+ valid_count = 0
247
+
248
+ for dist, idx in zip(distances[0], indices[0]):
249
+ # For inner product similarity, the distance is already the similarity
250
+ similarity = dist
251
+ if similarity > SIMILARITY_THRESHOLD:
252
+ continue
253
+
254
+ valid_indices.append(idx)
255
+ valid_distances.append(similarity)
256
+ valid_count += 1
257
+
258
+ if valid_count >= TOP_N_SIMILAR:
259
+ break
260
+
261
+ species_votes = {}
262
+ similar_images = []
263
+
264
+ for idx, similarity in zip(valid_indices[:5], valid_distances[:5]): # Only process top 5 for display
265
+ similar_img_id = id_mapping[idx]
266
+
267
+ try:
268
+ species_names = id_to_species_info.get(similar_img_id)
269
+ species_names = [name for name in species_names if name]
270
+
271
+ processed_names = set()
272
+ for species in species_names:
273
+ if not species:
274
+ continue
275
+ name_tuple = name_mapping.get(species)
276
+ if name_tuple:
277
+ processed_names.add(name_tuple[0])
278
+ else:
279
+ processed_names.add(species)
280
+
281
+ for species in processed_names:
282
+ species_votes[species] = species_votes.get(species, 0) + 1
283
+
284
+ # Store similar image info if the image file exists
285
+ # if img_path and os.path.exists(img_path):
286
+ similar_images.append({
287
+ 'id': similar_img_id,
288
+ 'species': next(iter(processed_names)) if processed_names else 'Unknown',
289
+ 'common_name': species_names[-1],
290
+ 'similarity': similarity # Add similarity score
291
+ })
292
+
293
+ except Exception as e:
294
+ logger.error(f"Error processing JSON for image {similar_img_id}: {e}")
295
+ continue
296
+
297
+ return species_votes, similar_images
298
+
299
+
300
+ if __name__ == "__main__":
301
+ logger.info("Starting.")
302
+ model = create_model(model_str, output_dict=True, require_pretrained=True)
303
+ model = model.to(device)
304
+ logger.info("Created model.")
305
+
306
+ model = torch.compile(model)
307
+ logger.info("Compiled model.")
308
+
309
+ tokenizer = get_tokenizer(tokenizer_str)
310
+
311
+ id_to_photo_url = json.load(open(PHOTO_LOOKUP_PATH))
312
+ id_to_species_info = json.load(open(SPECIES_LOOKUP_PATH))
313
+ logger.info(f"Loaded {len(id_to_photo_url)} photo mappings")
314
+ logger.info(f"Loaded {len(id_to_species_info)} species mappings")
315
+ # Load text embeddings and build name mapping
316
+ txt_emb = torch.from_numpy(np.load(txt_emb_npy, mmap_mode="r")).to(device)
317
+ with open(txt_names_json) as fd:
318
+ txt_names = json.load(fd)
319
+
320
+ # Build name mapping
321
+ name_mapping = build_name_mapping(txt_names)
322
+
323
+ # Build or load FAISS index with test IDs
324
+ faiss_index, id_mapping = load_faiss_index()
325
+
326
+ # Define process_output function before using it
327
+ def process_output(img, rank):
328
+ predictions, similar_imgs = open_domain_classification(img, rank)
329
+
330
+ logger.info(f"Number of similar images found: {len(similar_imgs)}")
331
+
332
+ images = []
333
+ labels = []
334
+
335
+ for img_info in similar_imgs:
336
+ img_id = img_info['id']
337
+ img_url = id_to_photo_url.get(img_id)
338
+ img_url = img_url.replace("square", "small")
339
+ logger.info(f"Processing image URL: {img_url}")
340
+
341
+ try:
342
+ # Try fetching from URL first
343
+ response = requests.get(img_url)
344
+ if response.status_code == 200:
345
+ try:
346
+ img = Image.open(io.BytesIO(response.content))
347
+ images.append(img)
348
+ except Exception as e:
349
+ logger.info(f"Failed to load image from URL: {e}")
350
+ images.append(None)
351
+ else:
352
+ logger.info(f"Failed to fetch image from URL: {response}")
353
+ images.append(None)
354
+
355
+ # Add label regardless of image load success
356
+ label = f"**{img_info['species']}**"
357
+ if img_info['common_name']:
358
+ label += f" ({img_info['common_name']})"
359
+ label += f"\nSimilarity: {img_info['similarity']:.3f}"
360
+ label += f"\n[View on iNaturalist](https://www.inaturalist.org/observations/{img_id})"
361
+ labels.append(label)
362
+
363
+ except Exception as e:
364
+ logger.error(f"Error processing image {img_id}: {e}")
365
+ images.append(None)
366
+ labels.append("")
367
+
368
+ # Pad arrays if needed
369
+ images += [None] * (5 - len(images))
370
+ labels += [""] * (5 - len(labels))
371
+
372
+ logger.info(f"Final number of images: {len(images)}")
373
+ logger.info(f"Final number of labels: {len(labels)}")
374
+
375
+ return [predictions] + images + labels
376
+
377
+ with gr.Blocks(theme=theme) as app:
378
+ # Add header
379
+ with gr.Row(variant="panel"):
380
+ with gr.Column(scale=1):
381
+ gr.Image("image.jpg", elem_id="logo-img",
382
+ show_label=False )
383
+ with gr.Column(scale=30):
384
+ gr.Markdown("""Biome is a vision foundation model-powered tool customized to identify Singapore's local biodiversity.
385
+ <br/> <br/>
386
+ **Developed by**: Pye Sone Kyaw - AI Engineer @ Multimodal AI Team - AI Practice - GovTech SG
387
+ <br/> <br/>
388
+ Under the hood, Biome is using [BioCLIP](https://github.com/Imageomics/BioCLIP) augmented with multimodal search and retrieval to enhance its Singapore-specific biodiversity classification capabilities.
389
+ """)
390
+
391
+ with gr.Row(variant="panel", elem_id="images_panel"):
392
+ img_input = gr.Image(
393
+ height=400,
394
+ sources=["upload"],
395
+ type="pil"
396
+ )
397
+
398
+
399
+
400
+ with gr.Row():
401
+
402
+ with gr.Column():
403
+ with gr.Row():
404
+ gr.Examples(
405
+ examples=example_images,
406
+ inputs=img_input,
407
+ label="Example Images"
408
+ )
409
+ rank_dropdown = gr.Dropdown(
410
+ label="Taxonomic Rank",
411
+ info="Which taxonomic rank to predict. Fine-grained ranks (genus, species) are more challenging.",
412
+ choices=ranks,
413
+ value="Species",
414
+ type="index",
415
+ )
416
+ open_domain_btn = gr.Button("Submit", variant="primary")
417
+ with gr.Column():
418
+ open_domain_output = gr.Label(
419
+ num_top_classes=k,
420
+ label="Prediction",
421
+ show_label=True,
422
+ value=None,
423
+ )
424
+
425
+ # New section for similar images
426
+ with gr.Row(variant="panel"):
427
+ with gr.Column():
428
+ gr.Markdown("### Most Similar Images from Database")
429
+
430
+ with gr.Row():
431
+ similar_images = [
432
+ gr.Image(label="Similar Image 1", height=200, show_label=True),
433
+ gr.Image(label="Similar Image 2", height=200, show_label=True),
434
+ gr.Image(label="Similar Image 3", height=200, show_label=True),
435
+ gr.Image(label="Similar Image 4", height=200, show_label=True),
436
+ gr.Image(label="Similar Image 5", height=200, show_label=True),
437
+ ]
438
+
439
+ with gr.Row():
440
+ similar_labels = [
441
+ gr.Markdown("Species 1"),
442
+ gr.Markdown("Species 2"),
443
+ gr.Markdown("Species 3"),
444
+ gr.Markdown("Species 4"),
445
+ gr.Markdown("Species 5"),
446
+ ]
447
+
448
+ rank_dropdown.change(
449
+ fn=change_output,
450
+ inputs=rank_dropdown,
451
+ outputs=[open_domain_output]
452
+ )
453
+
454
+ open_domain_btn.click(
455
+ fn=process_output,
456
+ inputs=[img_input, rank_dropdown],
457
+ outputs=[open_domain_output] + similar_images + similar_labels,
458
+ )
459
+
460
+ with gr.Row(variant="panel"):
461
+ gr.Markdown("""
462
+ **Disclaimer**: This is a proof-of-concept demo for non-commercial purposes. No data is stored or used for any form of training, and all data used for retrieval are from [iNaturalist](https://inaturalist.org/).
463
+ The adage of garbage in, garbage out applies here - uploading images not biodiversity-related will yield unpredictable results.
464
+ """)
465
+ app.queue(max_size=20)
466
+ app.launch(share=False, enable_monitoring=False, allowed_paths=["/app/"])
examples/bubul.jpg ADDED

Git LFS Details

  • SHA256: 603d8ebdd7af5ac7ca27cef32a6b12971801981fbb6f58b3d38dc4d47414449c
  • Pointer size: 132 Bytes
  • Size of remote file: 4.92 MB
examples/hornbill.jpg ADDED

Git LFS Details

  • SHA256: 3b696a13f572cbf122ab06e49e2253bd814ac374ad97ebe2499a231800292d8a
  • Pointer size: 132 Bytes
  • Size of remote file: 2.73 MB
examples/lizard.jpg ADDED

Git LFS Details

  • SHA256: 90716fff17528a25368b768deba331a050db41debe556d2f86fe72a8c5755a88
  • Pointer size: 132 Bytes
  • Size of remote file: 1.85 MB
examples/monkey.jpg ADDED

Git LFS Details

  • SHA256: 8628044ae5aad393d5dd2252bc23c305c81db5dcc2114a7d7aaff138da629ae4
  • Pointer size: 132 Bytes
  • Size of remote file: 1.63 MB
examples/otter.jpg ADDED

Git LFS Details

  • SHA256: 80f37c3dd64b9107a51416ab76505a2d151840f33d6c05bcba04da7a2db7bca3
  • Pointer size: 132 Bytes
  • Size of remote file: 2.01 MB
image.jpg ADDED
photo_lookup.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb8e6dbc951aa8aea31a950e0dac2e16bdc0fe5bd571f0f7c9fd06c26c8fa71
3
+ size 39006676
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ open_clip_torch
2
+ torchvision
3
+ torch
4
+ gradio
5
+ pillow
6
+ faiss-cpu
species_lookup.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a37171713219bf9ef85a1be7ff6152663dd046574b803032a94dee32272b0574
3
+ size 45906896
txt_emb_species.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:844e6fabc06cac072214d566b78f40825b154efa9479eb11285030ca038b2ece
3
+ size 65731052