Spaces:
Runtime error
Runtime error
post aisf commit
Browse files- models/aisf/void_20230517_112128.pth +0 -0
- models/aisf/void_20230517_113634.pth +0 -0
- models/aisf/void_20230517_115313.pth +0 -0
- notebooks/AISF Audio Preprocessing.ipynb +0 -0
- notebooks/AISF Model Train and Eval.ipynb +0 -0
- notebooks/AISF War Room.ipynb +0 -0
- notebooks/playground.ipynb +0 -0
- server/main.py +1 -1
- train.py +11 -6
models/aisf/void_20230517_112128.pth
ADDED
|
Binary file (655 kB). View file
|
|
|
models/aisf/void_20230517_113634.pth
ADDED
|
Binary file (655 kB). View file
|
|
|
models/aisf/void_20230517_115313.pth
ADDED
|
Binary file (655 kB). View file
|
|
|
notebooks/AISF Audio Preprocessing.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
notebooks/AISF Model Train and Eval.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
notebooks/AISF War Room.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
notebooks/playground.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
server/main.py
CHANGED
|
@@ -14,7 +14,7 @@ from cnn import CNNetwork
|
|
| 14 |
|
| 15 |
# load model
|
| 16 |
model = CNNetwork()
|
| 17 |
-
state_dict = torch.load("../models/aisf/
|
| 18 |
model.load_state_dict(state_dict)
|
| 19 |
|
| 20 |
# TODO: update to grabbing labels stored on model
|
|
|
|
| 14 |
|
| 15 |
# load model
|
| 16 |
model = CNNetwork()
|
| 17 |
+
state_dict = torch.load("../models/aisf/void_20230517_113634.pth")
|
| 18 |
model.load_state_dict(state_dict)
|
| 19 |
|
| 20 |
# TODO: update to grabbing labels stored on model
|
train.py
CHANGED
|
@@ -12,10 +12,11 @@ from dataset import VoiceDataset
|
|
| 12 |
from cnn import CNNetwork
|
| 13 |
|
| 14 |
BATCH_SIZE = 128
|
| 15 |
-
EPOCHS =
|
| 16 |
LEARNING_RATE = 0.001
|
| 17 |
|
| 18 |
TRAIN_FILE="data/train"
|
|
|
|
| 19 |
TEST_FILE="data/test"
|
| 20 |
SAMPLE_RATE=48000
|
| 21 |
|
|
@@ -35,7 +36,7 @@ def train(model, train_dataloader, loss_fn, optimizer, device, epochs, test_data
|
|
| 35 |
training_loss.append(train_epoch_loss/len(train_dataloader))
|
| 36 |
training_acc.append(train_epoch_acc/len(train_dataloader))
|
| 37 |
|
| 38 |
-
print("Training Loss: {:.2f}, Training Accuracy {
|
| 39 |
|
| 40 |
if test_dataloader:
|
| 41 |
# test model
|
|
@@ -45,7 +46,7 @@ def train(model, train_dataloader, loss_fn, optimizer, device, epochs, test_data
|
|
| 45 |
testing_loss.append(test_epoch_loss/len(test_dataloader))
|
| 46 |
testing_acc.append(test_epoch_acc/len(test_dataloader))
|
| 47 |
|
| 48 |
-
print("Testing Loss: {:.2f}, Testing Accuracy {
|
| 49 |
|
| 50 |
print ("-------------------------------------------- \n")
|
| 51 |
|
|
@@ -116,24 +117,28 @@ if __name__ == "__main__":
|
|
| 116 |
n_mels=128
|
| 117 |
)
|
| 118 |
|
| 119 |
-
train_dataset = VoiceDataset(
|
| 120 |
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
|
| 121 |
|
| 122 |
# construct model
|
| 123 |
model = CNNetwork().to(device)
|
| 124 |
print(model)
|
|
|
|
| 125 |
|
| 126 |
# init loss function and optimizer
|
| 127 |
loss_fn = nn.CrossEntropyLoss()
|
| 128 |
-
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
|
|
|
| 129 |
|
| 130 |
|
| 131 |
# train model
|
| 132 |
train(model, train_dataloader, loss_fn, optimizer, device, EPOCHS)
|
| 133 |
|
|
|
|
|
|
|
| 134 |
# save model
|
| 135 |
now = datetime.now()
|
| 136 |
now = now.strftime("%Y%m%d_%H%M%S")
|
| 137 |
-
model_filename = f"models/void_{now}.pth"
|
| 138 |
torch.save(model.state_dict(), model_filename)
|
| 139 |
print(f"Trained void model saved at {model_filename}")
|
|
|
|
| 12 |
from cnn import CNNetwork
|
| 13 |
|
| 14 |
BATCH_SIZE = 128
|
| 15 |
+
EPOCHS = 10
|
| 16 |
LEARNING_RATE = 0.001
|
| 17 |
|
| 18 |
TRAIN_FILE="data/train"
|
| 19 |
+
AISF_TRAIN_FILE="data/aisf/train"
|
| 20 |
TEST_FILE="data/test"
|
| 21 |
SAMPLE_RATE=48000
|
| 22 |
|
|
|
|
| 36 |
training_loss.append(train_epoch_loss/len(train_dataloader))
|
| 37 |
training_acc.append(train_epoch_acc/len(train_dataloader))
|
| 38 |
|
| 39 |
+
print("Training Loss: {:.2f}, Training Accuracy {}".format(training_loss[i], training_acc[i]))
|
| 40 |
|
| 41 |
if test_dataloader:
|
| 42 |
# test model
|
|
|
|
| 46 |
testing_loss.append(test_epoch_loss/len(test_dataloader))
|
| 47 |
testing_acc.append(test_epoch_acc/len(test_dataloader))
|
| 48 |
|
| 49 |
+
print("Testing Loss: {:.2f}, Testing Accuracy {}".format(testing_loss[i], testing_acc[i]))
|
| 50 |
|
| 51 |
print ("-------------------------------------------- \n")
|
| 52 |
|
|
|
|
| 117 |
n_mels=128
|
| 118 |
)
|
| 119 |
|
| 120 |
+
train_dataset = VoiceDataset(AISF_TRAIN_FILE, mel_spectrogram, device)
|
| 121 |
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
|
| 122 |
|
| 123 |
# construct model
|
| 124 |
model = CNNetwork().to(device)
|
| 125 |
print(model)
|
| 126 |
+
print(train_dataset.label_mapping)
|
| 127 |
|
| 128 |
# init loss function and optimizer
|
| 129 |
loss_fn = nn.CrossEntropyLoss()
|
| 130 |
+
# optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
| 131 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.9)
|
| 132 |
|
| 133 |
|
| 134 |
# train model
|
| 135 |
train(model, train_dataloader, loss_fn, optimizer, device, EPOCHS)
|
| 136 |
|
| 137 |
+
model.label_mapping = train_dataset.label_mapping
|
| 138 |
+
|
| 139 |
# save model
|
| 140 |
now = datetime.now()
|
| 141 |
now = now.strftime("%Y%m%d_%H%M%S")
|
| 142 |
+
model_filename = f"models/aisf/void_{now}.pth"
|
| 143 |
torch.save(model.state_dict(), model_filename)
|
| 144 |
print(f"Trained void model saved at {model_filename}")
|