Spaces:
Runtime error
Runtime error
devashish-bhake
commited on
Commit
·
f9f5661
1
Parent(s):
a351824
new file: SER_model/config.json
Browse filesnew file: SER_model/preprocessor_config.json
new file: SER_model/pytorch_model.bin
new file: SER_model/training_args.bin
new file: SPT_model/config.json
new file: SPT_model/preprocessor_config.json
new file: SPT_model/pytorch_model.bin
new file: SPT_model/special_tokens_map.json
new file: SPT_model/tokenizer_config.json
new file: SPT_model/vocab.json
new file: app.py
new file: flagged/audio/tmpqx7fc5l_.wav
new file: flagged/log.csv
new file: requirements.txt
new file: uploads/examples_03-01-01-01-01-02-05.wav
- SER_model/config.json +134 -0
- SER_model/preprocessor_config.json +9 -0
- SER_model/pytorch_model.bin +3 -0
- SER_model/training_args.bin +3 -0
- SPT_model/config.json +113 -0
- SPT_model/preprocessor_config.json +10 -0
- SPT_model/pytorch_model.bin +3 -0
- SPT_model/special_tokens_map.json +1 -0
- SPT_model/tokenizer_config.json +1 -0
- SPT_model/vocab.json +1 -0
- app.py +95 -0
- flagged/audio/tmpqx7fc5l_.wav +0 -0
- flagged/log.csv +2 -0
- requirements.txt +9 -0
- uploads/examples_03-01-01-01-01-02-05.wav +0 -0
SER_model/config.json
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "./SPT_model",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForSequenceClassification"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.1,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 256,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": false,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "sum",
|
45 |
+
"ctc_zero_infinity": false,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": false,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_norm": "group",
|
51 |
+
"feat_proj_dropout": 0.1,
|
52 |
+
"feat_quantizer_dropout": 0.0,
|
53 |
+
"final_dropout": 0.0,
|
54 |
+
"freeze_feat_extract_train": true,
|
55 |
+
"hidden_act": "gelu",
|
56 |
+
"hidden_dropout": 0.1,
|
57 |
+
"hidden_size": 768,
|
58 |
+
"id2label": {
|
59 |
+
"0": "angry",
|
60 |
+
"1": "disgust",
|
61 |
+
"2": "fear",
|
62 |
+
"3": "happy",
|
63 |
+
"4": "neutral",
|
64 |
+
"5": "sad"
|
65 |
+
},
|
66 |
+
"initializer_range": 0.02,
|
67 |
+
"intermediate_size": 3072,
|
68 |
+
"label2id": {
|
69 |
+
"angry": "0",
|
70 |
+
"disgust": "1",
|
71 |
+
"fear": "2",
|
72 |
+
"happy": "3",
|
73 |
+
"neutral": "4",
|
74 |
+
"sad": "5"
|
75 |
+
},
|
76 |
+
"layer_norm_eps": 1e-05,
|
77 |
+
"layerdrop": 0.0,
|
78 |
+
"mask_channel_length": 10,
|
79 |
+
"mask_channel_min_space": 1,
|
80 |
+
"mask_channel_other": 0.0,
|
81 |
+
"mask_channel_prob": 0.0,
|
82 |
+
"mask_channel_selection": "static",
|
83 |
+
"mask_feature_length": 10,
|
84 |
+
"mask_feature_min_masks": 0,
|
85 |
+
"mask_feature_prob": 0.0,
|
86 |
+
"mask_time_length": 10,
|
87 |
+
"mask_time_min_masks": 2,
|
88 |
+
"mask_time_min_space": 1,
|
89 |
+
"mask_time_other": 0.0,
|
90 |
+
"mask_time_prob": 0.05,
|
91 |
+
"mask_time_selection": "static",
|
92 |
+
"model_type": "wav2vec2",
|
93 |
+
"no_mask_channel_overlap": false,
|
94 |
+
"no_mask_time_overlap": false,
|
95 |
+
"num_adapter_layers": 3,
|
96 |
+
"num_attention_heads": 12,
|
97 |
+
"num_codevector_groups": 2,
|
98 |
+
"num_codevectors_per_group": 320,
|
99 |
+
"num_conv_pos_embedding_groups": 16,
|
100 |
+
"num_conv_pos_embeddings": 128,
|
101 |
+
"num_feat_extract_layers": 7,
|
102 |
+
"num_hidden_layers": 12,
|
103 |
+
"num_negatives": 100,
|
104 |
+
"output_hidden_size": 768,
|
105 |
+
"pad_token_id": 0,
|
106 |
+
"proj_codevector_dim": 256,
|
107 |
+
"tdnn_dilation": [
|
108 |
+
1,
|
109 |
+
2,
|
110 |
+
3,
|
111 |
+
1,
|
112 |
+
1
|
113 |
+
],
|
114 |
+
"tdnn_dim": [
|
115 |
+
512,
|
116 |
+
512,
|
117 |
+
512,
|
118 |
+
512,
|
119 |
+
1500
|
120 |
+
],
|
121 |
+
"tdnn_kernel": [
|
122 |
+
5,
|
123 |
+
3,
|
124 |
+
3,
|
125 |
+
1,
|
126 |
+
1
|
127 |
+
],
|
128 |
+
"torch_dtype": "float32",
|
129 |
+
"transformers_version": "4.25.1",
|
130 |
+
"use_weighted_layer_sum": false,
|
131 |
+
"vocab_size": 32,
|
132 |
+
"xvector_output_dim": 512
|
133 |
+
}
|
134 |
+
|
SER_model/preprocessor_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"return_attention_mask": false,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
SER_model/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:583aea2bf31f8713200e46bd9f1c6735beed40b5ec0d1f6e2decd45c6446e1b6
|
3 |
+
size 378352659
|
SER_model/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c08295d554ee360c4ed5b8090035b4384199468399ca15720321255bf7bf7b3
|
3 |
+
size 3503
|
SPT_model/config.json
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"adapter_kernel_size": 3,
|
4 |
+
"adapter_stride": 2,
|
5 |
+
"add_adapter": false,
|
6 |
+
"apply_spec_augment": true,
|
7 |
+
"architectures": [
|
8 |
+
"Wav2Vec2ConformerForCTC"
|
9 |
+
],
|
10 |
+
"attention_dropout": 0.1,
|
11 |
+
"bos_token_id": 1,
|
12 |
+
"classifier_proj_size": 256,
|
13 |
+
"codevector_dim": 768,
|
14 |
+
"conformer_conv_dropout": 0.1,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
+
"conv_depthwise_kernel_size": 31,
|
18 |
+
"conv_dim": [
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512,
|
25 |
+
512
|
26 |
+
],
|
27 |
+
"conv_kernel": [
|
28 |
+
10,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
3,
|
33 |
+
2,
|
34 |
+
2
|
35 |
+
],
|
36 |
+
"conv_stride": [
|
37 |
+
5,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2,
|
43 |
+
2
|
44 |
+
],
|
45 |
+
"ctc_loss_reduction": "sum",
|
46 |
+
"ctc_zero_infinity": false,
|
47 |
+
"diversity_loss_weight": 0.1,
|
48 |
+
"do_stable_layer_norm": true,
|
49 |
+
"eos_token_id": 2,
|
50 |
+
"feat_extract_activation": "gelu",
|
51 |
+
"feat_extract_dropout": 0.0,
|
52 |
+
"feat_extract_norm": "layer",
|
53 |
+
"feat_proj_dropout": 0.1,
|
54 |
+
"feat_quantizer_dropout": 0.0,
|
55 |
+
"final_dropout": 0.1,
|
56 |
+
"gradient_checkpointing": false,
|
57 |
+
"hidden_act": "swish",
|
58 |
+
"hidden_dropout": 0.1,
|
59 |
+
"hidden_dropout_prob": 0.1,
|
60 |
+
"hidden_size": 1024,
|
61 |
+
"initializer_range": 0.02,
|
62 |
+
"intermediate_size": 4096,
|
63 |
+
"layer_norm_eps": 1e-05,
|
64 |
+
"layerdrop": 0.0,
|
65 |
+
"mask_feature_length": 10,
|
66 |
+
"mask_feature_min_masks": 0,
|
67 |
+
"mask_feature_prob": 0.0,
|
68 |
+
"mask_time_length": 10,
|
69 |
+
"mask_time_min_masks": 2,
|
70 |
+
"mask_time_prob": 0.05,
|
71 |
+
"max_source_positions": 5000,
|
72 |
+
"model_type": "wav2vec2-conformer",
|
73 |
+
"num_adapter_layers": 3,
|
74 |
+
"num_attention_heads": 16,
|
75 |
+
"num_codevector_groups": 2,
|
76 |
+
"num_codevectors_per_group": 320,
|
77 |
+
"num_conv_pos_embedding_groups": 16,
|
78 |
+
"num_conv_pos_embeddings": 128,
|
79 |
+
"num_feat_extract_layers": 7,
|
80 |
+
"num_hidden_layers": 24,
|
81 |
+
"num_negatives": 100,
|
82 |
+
"output_hidden_size": 1024,
|
83 |
+
"pad_token_id": 0,
|
84 |
+
"position_embeddings_type": "relative",
|
85 |
+
"proj_codevector_dim": 768,
|
86 |
+
"rotary_embedding_base": 10000,
|
87 |
+
"tdnn_dilation": [
|
88 |
+
1,
|
89 |
+
2,
|
90 |
+
3,
|
91 |
+
1,
|
92 |
+
1
|
93 |
+
],
|
94 |
+
"tdnn_dim": [
|
95 |
+
512,
|
96 |
+
512,
|
97 |
+
512,
|
98 |
+
512,
|
99 |
+
1500
|
100 |
+
],
|
101 |
+
"tdnn_kernel": [
|
102 |
+
5,
|
103 |
+
3,
|
104 |
+
3,
|
105 |
+
1,
|
106 |
+
1
|
107 |
+
],
|
108 |
+
"torch_dtype": "float32",
|
109 |
+
"transformers_version": "4.19.0.dev0",
|
110 |
+
"use_weighted_layer_sum": false,
|
111 |
+
"vocab_size": 32,
|
112 |
+
"xvector_output_dim": 512
|
113 |
+
}
|
SPT_model/preprocessor_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"processor_class": "Wav2Vec2Processor",
|
8 |
+
"return_attention_mask": true,
|
9 |
+
"sampling_rate": 16000
|
10 |
+
}
|
SPT_model/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f8342d778e99445a2a3923b248235e2cb8c2b85ac7574d00495d2329b0fe4b6
|
3 |
+
size 2474878081
|
SPT_model/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
SPT_model/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2Processor"}
|
SPT_model/vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 1, "<pad>": 0, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}
|
app.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from flask import Flask, request, jsonify, flash, redirect, url_for
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torchaudio
|
6 |
+
from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification, Wav2Vec2Processor, Wav2Vec2ConformerForCTC
|
7 |
+
import librosa
|
8 |
+
import jellyfish
|
9 |
+
from werkzeug.utils import secure_filename
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
|
13 |
+
def speech_file_to_array_fn(path, sampling_rate):
|
14 |
+
speech_array, _sampling_rate = torchaudio.load(path)
|
15 |
+
resampler = torchaudio.transforms.Resample(_sampling_rate)
|
16 |
+
speech = resampler(speech_array).squeeze().numpy()
|
17 |
+
return speech
|
18 |
+
|
19 |
+
def predict(path, sampling_rate, feature_extractor, device, model, config):
|
20 |
+
speech = speech_file_to_array_fn(path, sampling_rate)
|
21 |
+
inputs = feature_extractor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
|
22 |
+
inputs = {key: inputs[key].to(device) for key in inputs}
|
23 |
+
with torch.no_grad():
|
24 |
+
logits = model(**inputs).logits
|
25 |
+
scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
|
26 |
+
outputs = [{"Emotion": config.id2label[i], "Score": f"{round(score * 100, 3):.1f}%"} for i, score in enumerate(scores)]
|
27 |
+
return outputs
|
28 |
+
|
29 |
+
def get_speech_to_text(model, processor, audio_path):
|
30 |
+
data, sample_rate = librosa.load(audio_path, sr=16000)
|
31 |
+
input_values = processor(data, return_tensors="pt", padding="longest").input_values
|
32 |
+
logits = model(input_values).logits
|
33 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
34 |
+
transcription = processor.batch_decode(predicted_ids)
|
35 |
+
return transcription
|
36 |
+
|
37 |
+
# def get_percentage_match(transcription, text):
|
38 |
+
# return jellyfish.damerau_levenshtein_distance(transcription, text)
|
39 |
+
|
40 |
+
def get_sos_status(transcription, key_phrase):
|
41 |
+
ct = 0
|
42 |
+
for words in key_phrase.split(" "):
|
43 |
+
# print(type(words))
|
44 |
+
if transcription[0].find(words) != -1:
|
45 |
+
ct = ct + 1
|
46 |
+
if ct == 3:
|
47 |
+
sos = 1
|
48 |
+
else:
|
49 |
+
sos = 0
|
50 |
+
return sos
|
51 |
+
|
52 |
+
def main(audio):
|
53 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
54 |
+
SPT_MODEL = "D:\kaggle_practice\KJSCE_hack\SERModel\SPT_model"
|
55 |
+
model_name_or_path = "D:\kaggle_practice\KJSCE_hack\SERModel\SER_model"
|
56 |
+
config = AutoConfig.from_pretrained(model_name_or_path)
|
57 |
+
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name_or_path)
|
58 |
+
sampling_rate = feature_extractor.sampling_rate
|
59 |
+
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name_or_path).to(device)
|
60 |
+
processor = Wav2Vec2Processor.from_pretrained(SPT_MODEL)
|
61 |
+
model_SPT = Wav2Vec2ConformerForCTC.from_pretrained(SPT_MODEL)
|
62 |
+
# path = r'testing_audios\03-01-06-02-02-01-01.wav'
|
63 |
+
outputs = predict(audio, sampling_rate, feature_extractor, device = device, model = model, config = config)
|
64 |
+
transcription = get_speech_to_text(model_SPT, processor, audio_path=audio)
|
65 |
+
key_phrase = "DOGS DOOR SITTING"
|
66 |
+
status = get_sos_status(transcription, key_phrase)
|
67 |
+
max_score = 0
|
68 |
+
emotion = ""
|
69 |
+
for i in outputs:
|
70 |
+
if float(i['Score'][:-1]) > max_score:
|
71 |
+
max_score = float(i['Score'][:-1])
|
72 |
+
emotion = i['Emotion']
|
73 |
+
if emotion in ['disgust', 'fear', 'sadness']:
|
74 |
+
emotion = 'negative'
|
75 |
+
elif emotion == 'neutral':
|
76 |
+
emotion = 'neutral'
|
77 |
+
else:
|
78 |
+
emotion = 'positive'
|
79 |
+
|
80 |
+
if emotion == 'negative' or status == 1:
|
81 |
+
sos = 1
|
82 |
+
else:
|
83 |
+
sos = 0
|
84 |
+
|
85 |
+
return [emotion, transcription, sos]
|
86 |
+
|
87 |
+
gr.Interface(
|
88 |
+
fn=main,
|
89 |
+
inputs=[
|
90 |
+
gr.inputs.Audio(source="upload", type="filepath")
|
91 |
+
],
|
92 |
+
outputs=[
|
93 |
+
"textbox"
|
94 |
+
],
|
95 |
+
live=True).launch(server_port=5000)
|
flagged/audio/tmpqx7fc5l_.wav
ADDED
Binary file (398 kB). View file
|
|
flagged/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
audio,output,flag,username,timestamp
|
2 |
+
D:\kaggle_practice\KJSCE_hack\SERModel\flagged\audio\tmpqx7fc5l_.wav,,,,2023-04-09 15:47:42.154241
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flask
|
2 |
+
torch==1.13.1
|
3 |
+
torchvision==0.14.1
|
4 |
+
torchaudio==0.13.1
|
5 |
+
transformers
|
6 |
+
librosa
|
7 |
+
jellyfish
|
8 |
+
werkzeug
|
9 |
+
panel
|
uploads/examples_03-01-01-01-01-02-05.wav
ADDED
Binary file (398 kB). View file
|
|