File size: 3,201 Bytes
f116a97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import re
import os
import numpy as np
from tensorboardX import SummaryWriter

writer = SummaryWriter('runs/metrics_visualization')

epoch_pattern = re.compile(r'Train epoch: (\d+) Learning rate: ([\d.eE+-]+)')
training_loss_pattern = re.compile(r'Training loss: ([\d.]+)')
metric_pattern = re.compile(r'(\w+ \w+ \w+): ([\d.]+) \(Std: ([\d.]+)\)')
avg_metric_pattern = re.compile(r'Metric avg (\w+)\s+: ([\d.]+)')

data = {
    'common': {
        'learning_rate': [],
        'training_loss': []
    },
    'dry': {
        'Instr dry sdr': [],
        'Instr dry l1_freq': [],
        'Instr dry si_sdr': []
    },
    'other': {
        'Instr other sdr': [],
        'Instr other l1_freq': [],
        'Instr other si_sdr': []
    },
    'avg': {
        'Metric avg sdr': [],
        'Metric avg l1_freq': [],
        'Metric avg si_sdr': []
    }
}

std_data = {
    'dry': {key: [] for key in data['dry'].keys()},
    'other': {key: [] for key in data['other'].keys()}
}

with open(r'E:\AI\datasets\msst\train.log', 'r') as f:
    epoch = -1
    for line in f:
        epoch_match = epoch_pattern.match(line)
        if epoch_match:
            epoch = int(epoch_match.group(1))
            learning_rate = float(epoch_match.group(2))
            data['common']['learning_rate'].append((epoch, learning_rate))
            continue
        
        training_loss_match = training_loss_pattern.match(line)
        if training_loss_match:
            training_loss = float(training_loss_match.group(1))
            data['common']['training_loss'].append((epoch, training_loss))
            continue
        
        metric_match = metric_pattern.match(line)
        if metric_match:
            metric_name = metric_match.group(1)
            metric_value = float(metric_match.group(2))
            std_value = float(metric_match.group(3))

            if metric_name in data['dry']:
                data['dry'][metric_name].append((epoch, metric_value))
                std_data['dry'][metric_name].append((epoch, std_value))
            elif metric_name in data['other']:
                data['other'][metric_name].append((epoch, metric_value))
                std_data['other'][metric_name].append((epoch, std_value))
            continue
        
        avg_metric_match = avg_metric_pattern.match(line)
        if avg_metric_match:
            avg_metric_name = f'Metric avg {avg_metric_match.group(1)}'
            avg_metric_value = float(avg_metric_match.group(2))
            data['avg'][avg_metric_name].append((epoch, avg_metric_value))

for category, metrics in data.items():
    for key, values in metrics.items():
        category_path = f'{category}/{key.replace(" ", "_").lower()}'
        for epoch, value in values:
            writer.add_scalar(f'{category_path}', value, epoch)

for category, metrics in std_data.items():
    for key, values in metrics.items():
        category_path = f'{category}/{key.replace(" ", "_").lower()}_std'
        for epoch, std in values:
            writer.add_scalar(f'{category_path}', std, epoch)

writer.close()
os.system('tensorboard --logdir=runs')