File size: 4,130 Bytes
1a881d8
 
 
55ece2a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a881d8
 
 
 
 
 
 
 
 
55ece2a
 
 
 
1a881d8
55ece2a
1a881d8
 
 
 
 
55ece2a
1a881d8
 
 
55ece2a
1a881d8
 
 
55ece2a
 
 
 
 
 
 
 
1a881d8
 
 
55ece2a
 
 
 
1a881d8
55ece2a
1a881d8
55ece2a
1a881d8
 
 
e335992
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from dataclasses import dataclass
from enum import Enum

class Model_Backbone(Enum):
    Original  = "Original"
    Other = "Other"

    def from_str(model_backbone: str):
        if model_backbone == Model_Backbone.Original.value:
            return Model_Backbone.Original
        return Model_Backbone.Other
    
    @classmethod
    def format_for_leaderboard(cls, model_backbone: str):
        return (cls.from_str(model_backbone), model_backbone)

class Training_Dataset(Enum):
    XCL = "BirdSet (XCL)"
    XCM = "BirdSet (XCM)"
    Dedicated = "BirdSet (Dedicated)"
    Other = "other"

    def from_str(training_dataset: str):
        if training_dataset in [Training_Dataset.Dedicated.value, Training_Dataset.Dedicated.name, "BirdSet - Dedicated", "dt", "DT"]:
            return Training_Dataset.Dedicated
        if training_dataset in [Training_Dataset.XCM.value, Training_Dataset.XCM.name, "BirdSet - XCM", "mt", "MT"]:
            return Training_Dataset.XCM
        if training_dataset in [Training_Dataset.XCL.value, Training_Dataset.XCL.name, "BirdSet - XCL", "lt", "LT"]:
            return Training_Dataset.XCL
        return Training_Dataset.Other
    
    @classmethod
    def format_for_leaderboard(cls, training_dataset: str):
        return (cls.from_str(training_dataset), training_dataset)

class Testing_Type(Enum):
    AVG = "avg"
    PER = "per"
    NES = "nes"
    UHH = "uhh"
    HSN = "hsn"
    NBP = "nbp"
    SSW = "ssw"
    SNE = "sne"


@dataclass
class Task:
    metric: str
    col_name: str


# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
    # metric_key in the json file, name to display in the leaderboard 
    cmap = Task("cmap", "cmAP")
    auroc = Task("auroc", "AUROC")
    t1acc = Task("t1-acc", "T1-Acc")

NUM_FEWSHOT = 0
# ---------------------------------------------------



# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">BirdSet Leaderboard</h1>"""

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
This leaderboard accompanies the [BirdSet Dataset Collection](https://huggingface.co/datasets/DBD-research-group/BirdSet). You can find out more about BirdSet in the \"About\" Tab.
"""

# Which evaluations are you running? how can people reproduce what you have?
ABOUT_TEXT = f"""
## What is BirdSet
Deep learning models have emerged as a powerful tool in avian bioacoustics to assess environmental health.
To maximize the potential of cost-effective and minimal-invasive passive acoustic monitoring (PAM), models must analyze bird vocalizations across a wide range of species and environmental conditions.
However, data fragmentation challenges a evaluation of generalization performance.
Therefore, we introduce the BirdSet dataset, comprising approximately 520,000 global bird recordings for training and over 400 hours PAM recordings for testing in a multi-label classification setting.  

You can find the datasets on [Huggingface](https://huggingface.co/datasets/DBD-research-group/BirdSet) and the code on [Github](https://github.com/DBD-research-group/BirdSet).
"""

EVALUATION_QUEUE_TEXT = """
## How to Submit a Model
First you need to evaluate your model on the BirdSet dataset.  
Then you can enter your evaluation information and submit a request.  
We will then check your request and approve it if everything is alright.

Please make sure that you model is publicly available so that we can check you results.

If you want to submit an average over all datasets then choose \"AVG\" as \"Tested on\".
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@misc{rauch2024birdset,
      title={BirdSet: A Dataset and Benchmark for Classification in Avian Bioacoustics}, 
      author={Lukas Rauch and Raphael Schwinger and Moritz Wirth and René Heinrich and Denis Huseljic and Jonas Lange and Stefan Kahl and Bernhard Sick and Sven Tomforde and Christoph Scholz},
      year={2024},
      eprint={2403.10380},
      archivePrefix={arXiv},
      primaryClass={cs.SD},
      url={https://arxiv.org/abs/2403.10380}, 
}
"""