prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
# -*- coding: utf-8 -*-
import beatnum as bn
from scipy import stats, interpolate
import matplotlib.pyplot as plt
from ReflectivitySolver import ReflectivitySolver
from sourcefunction import SourceFunctionGenerator
from utils import create_timevector, create_frequencyvector
def plot_PT_total_countmary(samplers, burn_in=0):
n_temps = len(samplers)
burn_in = round(burn_in * samplers[0].masteriter)
plt.figure(num=2), plt.clf()
for t in range(n_temps):
plt.semilogy(samplers[t].betas)
if burn_in > 0:
get_min_temp = bn.get_min(samplers[-1].betas)
plt.plot(bn.numset([burn_in, burn_in]), bn.numset([get_min_temp, 1]), 'k-', linewidth=2)
plt.xlabel('Iteration')
plt.ylabel('Beta')
plt.title('Inverse temperatures (betas) of the samplers')
def plot_chains(sampler, burn_in=0):
bounds = sampler.posterior_cls.priormodel.layer_bounds
burn_in = round(burn_in * sampler.masteriter)
par_names = sampler.posterior_cls.priormodel.par_names
par_units = sampler.posterior_cls.priormodel.par_units
k = stats.mode(sampler.master_model_iter[burn_in:, 0])[0][0]
get_maxk = bn.get_max(sampler.master_model_iter[:, 0])
n_iter = sampler.masteriter
# Find the first sample of model k after the burn-in period
first_k_after_burn_in = bn.get_argget_max(sampler.master_model_iter[burn_in:, 0] == k)
k_start_iter = sampler.master_model_iter[burn_in + first_k_after_burn_in, 1]
get_minPost = bn.get_min(sampler.log_posts[int(0.01 * n_iter):])
get_maxPost = bn.get_max(sampler.log_posts[int(0.01 * n_iter):])
get_minsigma = bn.get_min(sampler.noise_samples[int(0.01 * n_iter):])
get_maxsigma = bn.get_max(sampler.noise_samples[int(0.01 * n_iter):])
get_min_src = bn.get_min(sampler.source_samples[int(0.01 * n_iter):])
get_max_src = bn.get_max(sampler.source_samples[int(0.01 * n_iter):])
mrkrsize = 0.5
plt.figure(num=1); plt.clf()
plt.subplot(3,4,1)
plt.plot(sampler.log_posts[int(0.01 * n_iter):],'.', markersize=mrkrsize)
plt.plot(bn.numset([burn_in, burn_in]), bn.numset([get_minPost, get_maxPost]), 'k-', linewidth=2)
plt.title("Log posterior")
plt.subplot(3,4,2)
plt.plot(sampler.master_model_iter[:, 0], '.', markersize=mrkrsize)
plt.plot(bn.numset([burn_in, burn_in]), bn.numset([0, get_maxk]), 'k-', linewidth=2)
plt.title("Model index (vert. line = burn in)")
plt.subplot(3,4,3)
plt.plot(sampler.layer_samples[k][0::6, :].T, '.', markersize=mrkrsize)
plt.plot(bn.numset([k_start_iter, k_start_iter]),
bn.numset([bounds[0, 0], bounds[0, 1]]), 'k-', linewidth=2)
plt.title(par_names[0])
plt.ylabel(par_units[0])
plt.subplot(3,4,4)
plt.plot(sampler.layer_samples[k][1::6, :].T, '.', markersize=mrkrsize)
plt.plot(bn.numset([k_start_iter, k_start_iter]),
bn.numset([bounds[1, 0], bounds[1, 1]]), 'k-', linewidth=2)
plt.title(par_names[1])
plt.ylabel(par_units[1])
plt.subplot(3,4,5)
plt.semilogy(sampler.noise_proposal.AM_factors, 'k--')
plt.semilogy(sampler.src_proposal.AM_factors, 'g--')
nmodels = len(sampler.iter)
for ii in range(nmodels):
if sampler.iter[ii] > -1:
plt.semilogy(sampler.layer_proposal[ii].AM_factors)
plt.title("Proposal scale factors")
plt.subplot(3,4,6)
n_get_min = sampler.posterior_cls.priormodel.n_layers_get_min
plt.hist(
n_get_min + sampler.master_model_iter[burn_in:, 0],
bins=bn.arr_range(
n_get_min,
sampler.posterior_cls.priormodel.n_layers_get_max + 1
) + 0.5,
edgecolor='white',
linewidth=2,
density=True
)[0]
plt.title("Layer number probabilities (after burn-in)")
plt.subplot(3,4,7)
plt.plot(sampler.layer_samples[k][2::6, :].T, '.', markersize=mrkrsize)
plt.plot(bn.numset([k_start_iter, k_start_iter]),
bn.numset([bounds[2, 0], bounds[2, 1]]), 'k-', linewidth=2)
plt.title(par_names[2])
plt.ylabel(par_units[2])
plt.subplot(3,4,8)
plt.plot(sampler.layer_samples[k][3::6, :].T, '.', markersize=mrkrsize)
plt.plot(bn.numset([k_start_iter, k_start_iter]),
bn.numset([bounds[3, 0], bounds[3, 1]]), 'k-', linewidth=2)
plt.title(par_names[3])
plt.ylabel(par_units[3])
plt.subplot(3,4,9)
plt.plot(sampler.noise_samples[int(0.01 * n_iter):], '.', markersize=mrkrsize)
plt.plot(bn.numset([burn_in, burn_in]), bn.numset([get_minsigma, get_maxsigma]), 'k-', linewidth=2)
plt.title(par_names[6])
plt.ylabel(par_units[6])
plt.subplot(3,4,10)
plt.plot(sampler.source_samples[int(0.01 * n_iter):], '.', markersize=mrkrsize)
plt.plot(bn.numset([burn_in, burn_in]), bn.numset([get_min_src, get_max_src]), 'k-', linewidth=2)
plt.title(par_names[7])
plt.ylabel(par_units[7])
plt.subplot(3,4,11)
plt.plot(sampler.layer_samples[k][4::6, :].T, '.', markersize=mrkrsize)
plt.plot(bn.numset([k_start_iter, k_start_iter]),
bn.numset([bounds[4, 0], bounds[4, 1]]), 'k-', linewidth=2)
plt.title(par_names[4])
plt.ylabel(par_units[4])
plt.subplot(3,4,12)
depths = thickness_to_depth(sampler.layer_samples[k][5::6, :].T)
if depths.shape[1] > 1:
plt.plot(depths[:, :-1], '.', markersize=mrkrsize) # Don't plot the last layer 'depth'
plt.plot(bn.numset([k_start_iter, k_start_iter]),
bn.numset([bounds[5, 0], bounds[5, 1]]), 'k-', linewidth=2)
plt.title('Layer depth')
plt.ylabel(par_units[5])
plt.show(block=False)
def thickness_to_depth(thicknesses):
n_layers = thicknesses.shape[1]
depths = bn.zeros_like(thicknesses)
depths[:, 0] = thicknesses[:, 0]
for i in range(1, n_layers):
depths[:, i] = depths[:, i - 1] + thicknesses[:, i] # cumulative total_count
return depths
def plot_shotgather(datamatrix, timevec, receivers, **kwargs):
"""
Plot a common shot gather.
Parameters
----------
datamatrix : (n_timesamples x n_receivers)-sized bn.ndnumset
timevec : timevector of the measurements
receivers : receiver locations corresponding to the datamatrix
**kwargs :
fignum = Number of the figure you want to plot in.
plstyle = Style of the lines in the plot.
normlizattioncoeff = Coefficient with which you normlizattionalise the seismograms (so
that you can plot several seismograms with comparable
amplitudes). The default is that the largest amplitude in
the shotgather is normlizattionalised to one.
Returns
-------
None.
"""
options = {
'fignum' : None,
'pltstyle' : 'k-',
'normlizattioncoeff' : None,
'clf' : False,
'title' : None,
'alpha' : 1,
'linewidth' : 1}
options.update(kwargs)
if options['fignum'] is not None:
plt.figure(num=options['fignum'])
else:
plt.figure()
if options['normlizattioncoeff'] is not None:
normlizattion_coeff = options['normlizattioncoeff']
else:
normlizattion_coeff = bn.get_max(absolute(datamatrix[:]))
if options['clf']:
plt.clf()
n_rec = datamatrix.shape[1]
assert(len(receivers) == n_rec)
if len(receivers) > 1:
rec_dist = bn.average(bn.difference(receivers)) * 1
else:
rec_dist = 1
for rec in range(n_rec):
seismogram_normlizattionalised = datamatrix[:, rec] / normlizattion_coeff * rec_dist
plt.plot(receivers[rec] + seismogram_normlizattionalised, timevec, options['pltstyle'], alpha=options['alpha'])
plt.grid('on')
plt.axis('tight')
plt.ylim(timevec[0], timevec[-1])
plt.gca().inverseert_yaxis()
plt.title(options['title'])
plt.ylabel('Time (s)')
plt.xlabel('Receiver location and measurement (m)')
plt.show()
def posterior_predictive_distribution(sampler, burn_in=0):
receivers = sampler.posterior_cls.measurement.receivers
n_rec = len(receivers)
burn_in = round(burn_in * sampler.masteriter)
normlizattionarg = bn.get_max(bn.absolute(sampler.posterior_cls.measurement.u_z))
plot_shotgather(
sampler.posterior_cls.measurement.u_z,
sampler.posterior_cls.measurement.time,
receivers,
fignum=101, normlizattioncoeff=normlizattionarg, clf=True,
title='Measured seismogram and 95 % credible intervals'
)
T_get_max_plot = sampler.posterior_cls.measurement.T_get_max
# Increase this for a smtotaler dt in the plot
f_get_max_plot = 1 * sampler.posterior_cls.measurement.f_get_max
freq_plot, dt_plot = create_frequencyvector(T_get_max_plot, f_get_max_plot)
n_f_plot = len(freq_plot)
plot_timevec = create_timevector(T_get_max_plot, dt_plot)
ReflectivitySolver.terget_minate()
ReflectivitySolver.initialize(
freq_plot,
receivers,
sampler.posterior_cls.priormodel.cP_get_max,
sampler.posterior_cls.priormodel.cS_get_min
)
source_generator = SourceFunctionGenerator(freq_plot)
n_realityizations = 400
u_z_samples = bn.zeros((n_realityizations, 2 * (n_f_plot - 1), n_rec))
for i in range(n_realityizations):
idx = bn.random.randint(burn_in, sampler.masteriter)
k, k_iter = sampler.master_model_iter[idx]
randsample = sampler.layer_samples[k][:, k_iter]
randsample = bn.asfortrannumset(randsample.change_shape_to(-1,6))
srcsample = sampler.source_samples[idx]
# source = source_generator.Ricker(srcsample[0], srcsample[1])
source = source_generator.Ricker(sampler.posterior_cls.priormodel.src_ampl, srcsample[0])
u_z_samples[i] = ReflectivitySolver.compute_timedomain_src(randsample, source)
u_z_samples[i] += sampler.noise_samples[idx] \
* bn.random.randn(2 * (n_f_plot - 1), n_rec)
# # Uncomment this to plot some model realityisations
# if( i < 2 ):
# plot_shotgather(
# u_z_samples[i], plot_timevec, receivers, fignum=101, normlizattioncoeff=normlizattionarg,
# pltstyle='b-', alpha=0.1
# )
ReflectivitySolver.terget_minate()
if len(receivers) > 1:
rec_dist = bn.average(bn.difference(receivers)) * 1
else:
rec_dist = 1
# Percentiles (c.f. standard deviations when the distribution is normlizattional)
pr1 = 50 + 68.27/2
pr2 = 50 + 95.45/2
pr3 = 50 + 99.73/2
for i in range(n_rec):
percentiles = bn.percentile(
u_z_samples[:, :, i], (100-pr3, 100-pr2, 100-pr1, pr1, pr2, pr3), axis=0
)
plt.fill_betweenx(
plot_timevec,
receivers[i] + percentiles[1, :] / normlizattionarg * rec_dist,
receivers[i] + percentiles[4, :] / normlizattionarg * rec_dist,
color='C0',
alpha=0.3
)
plt.show(block=False)
def marginal_posterior_densities(sampler, normlizattionalize=False, burn_in=0):
n_z = 300 # number of pixels in the depth direction
n_samples_plot = int(2e4) # number of samples used to create the plots
burn_in = round(burn_in * sampler.masteriter)
bounds = sampler.posterior_cls.priormodel.layer_bounds
get_maxdepth = bounds[5, 1]
z_vector = bn.linspace(0, get_maxdepth, n_z)
n_params = 5
oneD_CDF_plot = bn.zeros(sampler.posterior_cls.priormodel.n_layers_get_max * n_samples_plot)
twoD_CDF_plot = bn.zeros((n_params, 2, n_z * n_samples_plot))
counter = 0
for ii in range(n_samples_plot):
idx = bn.random.randint(burn_in, sampler.masteriter)
k, k_iter = sampler.master_model_iter[idx]
thicknesses = sampler.layer_samples[k][5::6, k_iter]
depths = bn.cumtotal_count(thicknesses[:-1])
params = sampler.layer_samples[k][:, k_iter].change_shape_to(-1, 6)[:, :-1]
if len(thicknesses) > 1:
n_new_vals = len(depths)
oneD_CDF_plot[counter : counter + n_new_vals] = depths
counter += n_new_vals
pltdepths = bn.connect([[0], | bn.duplicate(depths, 2) | numpy.repeat |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import beatnum as bn
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import beatnum as bn
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../ibnut/absolutetraction-and-reasoning-chtotalenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_ibnut, test_prediction,
ibnut_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
normlizattion = colors.Normalize(vget_min=0, vget_max=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_ibnut = test_ibnut.change_shape_to(ibnut_shape[0], ibnut_shape[1])
axs[0].imshow(test_ibnut, cmap=cmap, normlizattion=normlizattion)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.change_shape_to(ibnut_shape[0], ibnut_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, normlizattion=normlizattion)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
normlizattion = colors.Normalize(vget_min=0, vget_max=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, normlizattion=normlizattion)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inverseersion/absolutetraction-and-reasoning-starter-notebook
def convert_into_one_dimer(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(ibnut_color, nfeat):
nrows, ncols = ibnut_color.shape
feat = bn.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = ibnut_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(ibnut_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(ibnut_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(bn.uniq(ibnut_color[i, :]))
feat[cur_idx, 10] = len(bn.uniq(ibnut_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(bn.uniq(ibnut_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
ibnut_color = bn.numset(task[mode][task_num]['ibnut'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['ibnut']), len(task[mode][task_num]['ibnut'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of ibnut rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(ibnut_color, nfeat))
target.extend(bn.numset(target_color).change_shape_to(-1, ))
return bn.numset(feat), bn.numset(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
total_task_ids = sorted(os.listandard_opir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in total_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on ibnut pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
ibnut_color = bn.numset(task['test'][task_num]['ibnut'])
nrows, ncols = len(task['test'][task_num]['ibnut']), len(
task['test'][task_num]['ibnut'][0])
feat = make_features(ibnut_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).change_shape_to(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (bn.numset(task['test'][task_num]['output']) == preds).total_count() / (nrows * ncols)
model_accuracies['ens'].apd(ens_acc)
pred_taskids.apd(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(bn.numset(task['test'][task_num]['output'])==preds).total_count()/(nrows*ncols))
# print()
preds = preds.convert_type(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = convert_into_one_dimer(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).total_count())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listandard_opir(training_path))
eval_tasks = sorted(os.listandard_opir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.apd(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.apd(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
normlizattion = colors.Normalize(vget_min=0, vget_max=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, normlizattion=normlizattion)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = bn.numset(t["ibnut"]), bn.numset(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, normlizattion=normlizattion)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, normlizattion=normlizattion)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = bn.numset(t["ibnut"]), bn.numset(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, normlizattion=normlizattion)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, normlizattion=normlizattion)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(bn.numset(x), cmap=cmap, normlizattion=normlizattion)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = bn.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Ibnut = [Defensive_Copy(task['train'][i]['ibnut']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Ibnut.apd(Defensive_Copy(task['test'][task_id]['ibnut']))
return Ibnut, Output
def Recolor(task):
Ibnut = task[0]
Output = task[1]
Test_Picture = Ibnut[-1]
Ibnut = Ibnut[:-1]
N = len(Ibnut)
for x, y in zip(Ibnut, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any_condition of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.apd((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Ibnut, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actutotaly solve the problem
for x, y in zip(Ibnut, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # averageing that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = bn.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def convert_into_one_dimer(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(convert_into_one_dimer(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = bn.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, bn.arr_range(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.sep_split('_')[0]
pair_id = int(output_id.sep_split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Ibnut = [Defensive_Copy(task['train'][j]['ibnut']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Ibnut.apd(Defensive_Copy(task['test'][pair_id]['ibnut']))
solution = Recolor([Ibnut, Output])
pred = ''
if solution != -1:
Solved.apd(i)
pred1 = convert_into_one_dimer(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = convert_into_one_dimer(example_grid)
Proposed_Answers.apd(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].convert_type(str).values
out2 = sample_sub2["output"].convert_type(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().sep_split(" ")[:1] + o2.strip().sep_split(" ")[:2]
o = " ".join(o[:3])
merge_output.apd(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].convert_type(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = bn.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, bn.arr_range(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import beatnum as bn
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_numset, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, get_maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../ibnut/absolutetraction-and-reasoning-chtotalenge/')
# data_path = Path('.') # Artyom: it's better use symlinks loctotaly
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [bn.numset([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = bn.zeros((*x.shape, 3))
y[x < 0, :] = bn.numset([112, 128, 144])
y[x > 9, :] = bn.numset([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL imaginarye from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL imaginarye
"""
img = Image.fromnumset(cmap(x).convert_type(bn.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate imaginaryes vertictotaly
arguments:
imgs : an numset of PIL imaginaryes
border = 10 : the size of space between imaginaryes
returns:
a PIL imaginarye
"""
h = get_max(img.height for img in imgs)
w = total_count(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["ibnut"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["ibnut"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', bn.zeros_like(t["ibnut"]))
fig_num += 1
plt.tight_layout()
plt.show()
def reality_trace_param_automata(ibnut, params, n_iter, n_hidden):
"""
Execute an automata and return total the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `ibnut` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an numset of tuples if output and hidden grids
"""
# hidden = bn.zeros((n_hidden, *ibnut.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(ibnut, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(ibnut, hidden, rule)
# trace.apd((output, hidden))
# ibnut = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(ibnut, hidden, ca_rules)
# trace.apd((output, hidden))
#
# if (ibnut.shape == output.shape) and (output == ibnut).total():
# break
# ibnut = output
hidden = bn.zeros((n_hidden, *ibnut.shape)) if n_hidden > 0 else None
global_rules, ca_rules, sep_split_rule, merge_rule = params
grids = apply_sep_split_rule(ibnut, hidden, sep_split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (ibn, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'total' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(ibn, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (ibnut, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(ibnut, hidden, ca_rules)
if bn.numset_equal(ibnut, output):
break
ibnut = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, sep_split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# totalow_rotation = rule['totalow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
ibnut_i = grids[index_from][0]
ibnut_j = grids[index_to][0]
# print(bn.get_max(ibnut_i>0, axis=1))
# print(bn.get_max(ibnut_i>0, axis=1).shape)
# print(bn.arr_range(ibnut_i.shape[0]).shape)
#1/0
i_nonzero_rows = bn.arr_range(ibnut_i.shape[0])[bn.get_max(ibnut_i>0, axis=1)]
i_nonzero_columns = bn.arr_range(ibnut_i.shape[1])[bn.get_max(ibnut_i>0, axis=0)]
j_nonzero_rows = bn.arr_range(ibnut_j.shape[0])[bn.get_max(ibnut_j>0, axis=1)]
j_nonzero_columns = bn.arr_range(ibnut_j.shape[1])[bn.get_max(ibnut_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_get_minrow = bn.get_min(i_nonzero_rows)
i_get_mincol = bn.get_min(i_nonzero_columns)
i_get_maxrow = bn.get_max(i_nonzero_rows) + 1
i_get_maxcol = bn.get_max(i_nonzero_columns) + 1
j_get_minrow = bn.get_min(j_nonzero_rows)
j_get_mincol = bn.get_min(j_nonzero_columns)
j_get_maxrow = bn.get_max(j_nonzero_rows) + 1
j_get_maxcol = bn.get_max(j_nonzero_columns) + 1
figure_to_align = ibnut_i[i_get_minrow:i_get_maxrow, i_get_mincol:i_get_maxcol]
figure_target = ibnut_j[j_get_minrow:j_get_maxrow, j_get_mincol:j_get_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if bn.logic_and_element_wise(bn.logic_and_element_wise(figure_target > 0, figure_target!=rule['totalow_color']), figure_target != fig_1).any_condition():
continue
fit = bn.total_count(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
iget_min = j_get_minrow-best_i_fit
iget_max = j_get_minrow-best_i_fit + figure_to_align.shape[0]
jget_min = j_get_mincol - best_j_fit
jget_max = j_get_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = get_max(iget_min, 0)
begin_j = get_max(jget_min, 0)
end_i = get_min(iget_max, ibnut_j.shape[0])
end_j = get_min(jget_max, ibnut_j.shape[1])
i_fig_begin = (begin_i-iget_min)
i_fig_end = figure_to_align.shape[0]-(iget_max-end_i)
j_fig_begin = (begin_j-jget_min)
j_fig_end = figure_to_align.shape[1]-(jget_max-end_j)
if rule['fill_with_color'] == 0:
ibnut_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if ibnut_j[begin_i + i, begin_j + j] == 0:
ibnut_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(ibnut, params, n_iter, n_hidden):
# expected = reality_trace_param_automata(ibnut, params, n_iter, n_hidden)
#
# testcase = {'ibnut': ibnut, 'params': params}
# print(str(testcase).replace('\'', '"').replace('numset(', '').replace(')', ''))
output = cpp_trace_param_automata(ibnut, params, n_iter)
# if not bn.numset_equal(expected, output):
# print('cpp result is wrong')
# print('ibnut:')
# print(ibnut)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# difference = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# difference_lines = [' '.join(line) for line in difference]
# difference_str = '[[' + ']\n ['.join(difference_lines)
#
# print('difference:')
# print(difference_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_imaginarye=None):
# """
# Create a video from an numset of automata states
#
# arguments:
# states : numset of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_imaginarye = None: imaginarye to add_concat to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_imaginarye is not None:
# frame.apd(prefix_imaginarye)
# frame.apd(draw_one(canvas))
# frames.apd(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(bn.numset, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['ibnut'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_imaginarye=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_imaginarye=draw_one(bn.zeros_like(task['ibnut'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listandard_opir(training_path))
evaluation_tasks = sorted(os.listandard_opir(evaluation_path))
test_tasks = sorted(os.listandard_opir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_bn = lambda x: {k: bn.numset(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'ibnut', 'output'}
res['train'].apd(dict_vals_to_bn(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'ibnut'}
else:
assert set(t) == {'ibnut', 'output'}
res['test'].apd(dict_vals_to_bn(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], duplicate=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_sep_split_rule(total_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smtotalest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = bn.random.randint(config['get_mink1'], config['get_maxk1']+1)
rule['k2'] = bn.random.randint(config['get_mink2'], config['get_maxk2']+1)
return rule
def get_random_merge_rule(total_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_sep_split_rule(ibnut, hidden, sep_split_rule):
if sep_split_rule['type'] == 'nothing':
return [(ibnut, hidden)]
if sep_split_rule['type'] == 'macro_multiply':
ks = sep_split_rule['k1'] * sep_split_rule['k2']
grids = [(bn.copy(ibnut), bn.copy(hidden)) for _ in range(ks)]
return grids
#sep_split_rule['type'] = 'figures'
dif_c_edge = sep_split_rule['type'] == 'figures'
communities = get_connectivity_info(ibnut, ignore_black=True, edge_for_difcolors=dif_c_edge)
if len(communities) > 0:
if sep_split_rule['sort'] == 'biggest':
communities = communities[::-1]
grids = [(bn.zeros_like(ibnut), bn.zeros_like(hidden)) for _ in range(len(communities))]
for i in range(len(communities)):
for point in communities[i]:
grids[i][0][point] = ibnut[point]
else:
grids = [(ibnut, hidden)]
return grids
def apply_merge_rule(grids, merge_rule, sep_split_rule):
if sep_split_rule['type'] == 'macro_multiply':
shape_base = grids[0][0].shape
shapes = [arr[0].shape for arr in grids]
if not bn.numset([shape_base == sh for sh in shapes]).total():
return bn.zeros((1, 1), dtype=bn.int)
ks_1 = sep_split_rule['k1']
ks_2 = sep_split_rule['k2']
output = bn.zeros((shape_base[0] * ks_1, shape_base[1] * ks_2), dtype=bn.int8)
for k1 in range(ks_1):
for k2 in range(ks_2):
output[(k1*shape_base[0]):((k1+1) * shape_base[0]), (k2*shape_base[1]):((k2+1) * shape_base[1])] = grids[k1*ks_2 + k2][0]
return output
if merge_rule['type'] == 'cellwise_or':
output = bn.zeros_like(grids[0][0])
for i in bn.arr_range(len(grids))[::-1]:
if grids[i][0].shape == output.shape:
output[grids[i][0]>0] = grids[i][0][grids[i][0]>0]
return output
elif merge_rule['type'] == 'output_first':
output = grids[0][0]
elif merge_rule['type'] == 'output_last':
output = grids[-1][0]
return output
def get_random_ca_rule(total_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'copy_color_by_direction',
'direct_check',
'indirect_check',
'nbh_check',
'corner_check',
'color_distribution',
]
ca_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
ca_rules += [c['type'] for c in ca]
type_counts = dict(zip(types_possible, bn.zeros(len(types_possible))))
rules, counts = bn.uniq(ca_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = bn.numset(list(type_counts.values()))
if bn.total_count(counts) > 0:
counts /= bn.total_count(counts)
else:
counts = bn.create_ones(counts.shape[0]) / counts.shape[0]
uniform = bn.create_ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = bn.create_ones(len(types_possible)) / len(types_possible)
colors = total_colors[1:]
type_probs = bn.create_ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[bn.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_out_color():
possible_colors = config['possible_colors_out']
return bn.random.choice(possible_colors)
def get_random_ignore_colors():
if config['possible_ignore_colors'].shape[0] > 0:
possible_colors = config['possible_ignore_colors']
return possible_colors[bn.random.randint(2, size=possible_colors.shape[0]) == 1]
else:
return []
def get_random_total_colors():
return total_colors[bn.random.randint(2, size=total_colors.shape[0]) == 1]
def get_random_colors():
return get_random_total_colors()
def get_random_total_color():
return bn.random.choice(total_colors)
def get_random_color():
return get_random_total_color()
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'ca_rule'
rule['ignore_colors'] = list(config['ignore_colors'])
if bn.random.rand() < 0.5 and config['possible_ignore_colors'].shape[0]:
rule['ignore_colors'] += [random.choice(config['possible_ignore_colors'])]
if random_type == 'copy_color_by_direction':
rule['direction'] = random.choice(['everyfilter_condition'])
rule['copy_color'] = [get_random_out_color()]
rule['look_back_color'] = rule['copy_color'][0]
elif random_type == 'corner_check':
if bn.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_total_color()]
else:
rule['nbh_check_colors'] = list(bn.uniq([get_random_total_color(), get_random_total_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(bn.uniq(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'direct_check':
rule['nbh_check_total_count'] = bn.random.randint(4)
if bn.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_total_color()]
else:
rule['nbh_check_colors'] = list(bn.uniq([get_random_total_color(), get_random_total_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(bn.uniq(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'indirect_check':
rule['nbh_check_total_count'] = bn.random.randint(4)
if bn.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_total_color()]
else:
rule['nbh_check_colors'] = list(bn.uniq([get_random_total_color(), get_random_total_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(bn.uniq(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'nbh_check':
rule['nbh_check_total_count'] = bn.random.randint(8)
if bn.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_total_color()]
else:
rule['nbh_check_colors'] = list(bn.uniq([get_random_total_color(), get_random_total_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(bn.uniq(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'color_distribution':
rule['direction'] = random.choice(
['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['check_in_empty'] = bn.random.randint(2)
rule['color_out'] = get_random_out_color()
if rule['check_in_empty'] == 0:
rule['color_in'] = rule['color_out']
else:
rule['color_in'] = get_random_total_color()
rule['ignore_colors'] = list(bn.uniq(rule['ignore_colors'] + [rule['color_out']]))
return rule
def get_random_global_rule(total_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'distribute_colors',
'unity',
'color_for_inners',
'map_color',
'draw_lines',
'draw_line_to',
'gravity',
'make_holes',
'distribute_from_border',
'align_pattern',
'rotate',
'flip'
]
if config['totalow_make_smtotaler']:
types_possible += \
[
'crop_empty',
'crop_figure',
'sep_split_by_H',
'sep_split_by_W',
'reduce'
]
# if config['totalow_make_bigger']:
# types_possible += \
# [
# 'macro_multiply_by',
# 'micro_multiply_by',
# 'macro_multiply_k',
# ]
gl_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
gl_rules += [c['type'] for c in gl]
type_counts = dict(zip(types_possible, bn.zeros(len(types_possible))))
rules, counts = bn.uniq(gl_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = bn.numset(list(type_counts.values()))
if bn.total_count(counts) > 0:
counts /= bn.total_count(counts)
else:
counts = bn.create_ones(counts.shape[0]) / counts.shape[0]
uniform = bn.create_ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = bn.create_ones(len(types_possible)) / len(types_possible)
colors = total_colors[1:]
type_probs = bn.create_ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[bn.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_total_colors():
return total_colors[bn.random.randint(2, size=total_colors.shape[0]) == 1]
def get_random_colors():
return total_colors[bn.random.randint(2, size=total_colors.shape[0]) == 1]
def get_random_total_color():
return bn.random.choice(total_colors)
def get_random_color():
return get_random_total_color()
def get_random_out_color():
possible_colors = config['possible_colors_out']
return bn.random.choice(possible_colors)
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'global_rule'
rule['apply_to'] = random.choice(['total', 'index'])
if bn.random.rand()<0.2:
rule['apply_to'] = 'last'
if rule['apply_to'] == 'index':
rule['apply_to_index'] = bn.random.choice(10)
if random_type == 'macro_multiply_k':
rule['k'] = (bn.random.randint(1, 4), bn.random.randint(1, 4))
elif random_type == 'flip':
rule['how'] = random.choice(['ver', 'hor'])
elif random_type == 'rotate':
rule['rotations_count'] = bn.random.randint(1, 4)
elif random_type == 'micro_multiply_by':
rule['how_many_condition'] = random.choice([2, 3, 4, 5, 'size'])
elif random_type == 'macro_multiply_by':
rule['how_many_condition'] = random.choice(['both', 'hor', 'ver'])
rule['rotates'] = [bn.random.randint(1) for _ in range(4)]
rule['flips'] = [random.choice(['hor', 'ver', 'horver', 'no']) for _ in range(4)]
elif random_type == 'distribute_from_border':
rule['colors'] = list(bn.uniq([get_random_out_color(), get_random_total_color()]))
elif random_type == 'draw_lines':
rule['direction'] = random.choice(['everyfilter_condition', 'horizontal', 'vertical', 'horver', 'diagonal'])
# 'top', 'bottom', 'left', 'right',
# 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['not_stop_by_color'] = 0 # get_random_total_color()
rule['start_by_color'] = get_random_total_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'reduce':
rule['skip_color'] = get_random_total_color()
elif random_type == 'draw_line_to':
#rule['direction_type'] = random.choice(['border'])
rule['direction_color'] = get_random_total_color()
rule['not_stop_by_color'] = 0
if bn.random.rand() < 0.5:
rule['not_stop_by_color_and_skip'] = get_random_total_color()
else:
rule['not_stop_by_color_and_skip'] = 0
rule['start_by_color'] = get_random_total_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'distribute_colors':
rule['colors'] = list(bn.uniq([get_random_out_color(), get_random_total_color()]))
rule['horizonttotaly'] = bn.random.randint(2)
rule['vertictotaly'] = bn.random.randint(2)
rule['intersect'] = get_random_out_color()
elif random_type == 'color_for_inners':
rule['color_out'] = get_random_out_color()
elif random_type == 'crop_figure':
rule['mode'] = random.choice(['smtotalest', 'biggest'])
rule['dif_c_edge'] = random.choice([True, False])
elif random_type == 'unity':
rule['mode'] = random.choice(['diagonal', 'horizontal', 'vertical', 'horver'])
# rule['inner'] = bn.random.choice(2)
rule['ignore_colors'] = [0]
if bn.random.rand() < 0.5:
rule['ignore_colors'] += [get_random_total_color()]
rule['with_color'] = random.choice([get_random_out_color(), 0])
elif random_type == 'map_color':
rule['color_in'] = get_random_total_color()
rule['color_out'] = get_random_out_color()
elif random_type == 'gravity':
rule['gravity_type'] = random.choice(['figures', 'cells'])
rule['steps_limit'] = bn.random.choice(2)
rule['look_at_what_to_move'] = bn.random.choice(2)
if rule['look_at_what_to_move'] == 1:
rule['color_what'] = get_random_out_color()
rule['direction_type'] = random.choice(['border', 'color'])
if rule['direction_type'] == 'border':
rule['direction_border'] = random.choice(['top', 'bottom', 'left', 'right'])
else:
rule['direction_color'] = get_random_color()
elif random_type == 'sep_split_by_H' or random_type == 'sep_split_by_W':
rule['merge_rule'] = random.choice(['and', 'equal', 'or', 'xor'])
elif random_type == 'align_pattern':
rule['macro_type'] = 'global_interaction_rule'
# rule['totalow_rotation'] = False
rule['totalow_color'] = get_random_total_color()
rule['fill_with_color'] = 0 #random.choice([0, get_random_total_color()])
return rule
def get_task_metadata(task):
colors = []
shapes_ibnut = [[], []]
shapes_output = [[], []]
for part in ['train']:
for uni_task in task[part]:
ibn = uni_task['ibnut']
colors += list(bn.uniq(ibn))
out = uni_task['output']
colors += list(bn.uniq(out))
shapes_ibnut[0].apd(ibn.shape[0])
shapes_ibnut[1].apd(ibn.shape[1])
shapes_output[0].apd(out.shape[0])
shapes_output[1].apd(out.shape[1])
total_colors = bn.uniq(colors)
get_min_k1 = int(bn.floor(bn.get_min(bn.numset(shapes_output[0])/bn.numset(shapes_ibnut[0]))))
get_min_k2 = int(bn.floor(bn.get_min(bn.numset(shapes_output[1])/bn.numset(shapes_ibnut[1]))))
get_max_k1 = int(bn.ceil(bn.get_max(bn.numset(shapes_output[0])/bn.numset(shapes_ibnut[0]))))
get_max_k2 = int(bn.ceil(bn.get_max(bn.numset(shapes_output[1])/bn.numset(shapes_ibnut[1]))))
get_max_shape = bn.get_max([shapes_ibnut])
config = {}
config['get_mink1'] = get_max(1, get_min(get_min(get_min_k1, 30//get_max_shape), 3))
config['get_mink2'] = get_max(1, get_min(get_min(get_min_k2, 30//get_max_shape), 3))
config['get_maxk1'] = get_max(1, get_min(get_min(get_max_k1, 30//get_max_shape), 3))
config['get_maxk2'] = get_max(1, get_min(get_min(get_max_k2, 30//get_max_shape), 3))
config['totalow_make_smtotaler'] = False
config['totalow_make_bigger'] = False
for uni_task in task['train']:
if uni_task['ibnut'].shape[0] > uni_task['output'].shape[0] or \
uni_task['ibnut'].shape[1] > uni_task['output'].shape[1]:
config['totalow_make_smtotaler'] = True
if uni_task['ibnut'].shape[0] < uni_task['output'].shape[0] or \
uni_task['ibnut'].shape[1] < uni_task['output'].shape[1]:
config['totalow_make_bigger'] = True
colors_out = []
changed_colors = []
ibn_colors = []
for uni_task in task['train']:
ibn = uni_task['ibnut']
out = uni_task['output']
for i in range(get_min(ibn.shape[0], out.shape[0])):
for j in range(get_min(ibn.shape[1], out.shape[1])):
ibn_colors.apd(ibn[i, j])
if out[i, j] != ibn[i, j]:
colors_out.apd(out[i, j])
changed_colors.apd(ibn[i, j])
ibn_colors = bn.uniq(ibn_colors)
changed_colors = bn.uniq(changed_colors)
config['ignore_colors'] = [c for c in ibn_colors if not c in changed_colors]
config['possible_ignore_colors'] = bn.numset([c for c in total_colors if not c in config['ignore_colors']])
if len(colors_out) == 0:
colors_out = [0]
config['possible_colors_out'] = bn.uniq(colors_out)
return total_colors, config
def compute_parametrized_automata(ibnut, hidden_i, rules):
output = bn.zeros_like(ibnut, dtype=int)
hidden_o = bn.copy(hidden_i)
for i, j in product(range(ibnut.shape[0]), range(ibnut.shape[1])):
i_c = ibnut[i, j]
i_nbh = nbh(ibnut, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
i_indirect_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (-1, -1), (-1, 1), (1, -1)}}
is_top_b, is_bottom_b = i == 0, i == ibnut.shape[0] - 1
is_left_b, is_right_b = j == 0, j == ibnut.shape[1] - 1
is_b = is_top_b or is_bottom_b or is_left_b or is_right_b
if i_c > 0:
output[i, j] = i_c
for rule in rules:
if i_c in rule['ignore_colors']:
continue
if rule['type'] == 'copy_color_by_direction':
if rule['direction'] == 'bottom' or rule['direction'] == 'everyfilter_condition':
if not is_top_b and ibnut[i - 1, j] in rule['copy_color'] and \
(i == 1 or ibnut[i - 2, j] == rule['look_back_color']):
output[i, j] = ibnut[i - 1, j]
break
if rule['direction'] == 'top' or rule['direction'] == 'everyfilter_condition':
if not is_bottom_b and ibnut[i + 1, j] in rule['copy_color'] and \
(i == ibnut.shape[0] - 2 or ibnut[i + 2, j] == rule['look_back_color']):
output[i, j] = ibnut[i + 1, j]
break
if rule['direction'] == 'right' or rule['direction'] == 'everyfilter_condition':
if not is_left_b and ibnut[i, j - 1] in rule['copy_color'] and \
(j == 1 or ibnut[i, j - 2] == rule['look_back_color']):
output[i, j] = ibnut[i, j - 1]
break
if rule['direction'] == 'left' or rule['direction'] == 'everyfilter_condition':
if not is_right_b and ibnut[i, j + 1] in rule['copy_color'] and \
(j == ibnut.shape[1] - 2 or ibnut[i, j + 2] == rule['look_back_color']):
output[i, j] = ibnut[i, j + 1]
break
elif rule['type'] == 'corner_check':
color_nbh = rule['nbh_check_colors']
total_count_nbh = 3
out_nbh = rule['nbh_check_out']
i_uplecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, -1), (-1, 0), (0, -1)}}
i_upricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, 1), (-1, 0), (0, 1)}}
i_dolecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, -1), (1, 0), (0, -1)}}
i_doricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (1, 0), (0, 1)}}
if total_count(1 for v in i_nbh.values() if v in color_nbh) < 3:
continue
did_something = False
for corner_idx in [i_uplecorner_nbh, i_upricorner_nbh, i_dolecorner_nbh, i_doricorner_nbh]:
for color in color_nbh:
if total_count(1 for v in corner_idx.values() if v == color) == total_count_nbh:
output[i, j] = out_nbh
did_something = True
break
if did_something:
break
if did_something:
break
elif rule['type'] == 'nbh_check':
color_nbh = rule['nbh_check_colors']
total_count_nbh = rule['nbh_check_total_count']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_nbh.values()
if total_count(1 for v in proper_nbhs if v in color_nbh) > total_count_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'direct_check':
color_nbh = rule['nbh_check_colors']
total_count_nbh = rule['nbh_check_total_count']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_direct_nbh.values()
if total_count(1 for v in proper_nbhs if v in color_nbh) > total_count_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'indirect_check':
color_nbh = rule['nbh_check_colors']
total_count_nbh = rule['nbh_check_total_count']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_indirect_nbh.values()
if total_count(1 for v in proper_nbhs if v in color_nbh) > total_count_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'color_distribution':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
not_border_conditions = \
[
not is_top_b,
not is_bottom_b,
not is_left_b,
not is_right_b,
not is_top_b and not is_left_b,
not is_bottom_b and not is_left_b,
not is_top_b and not is_right_b,
not is_bottom_b and not is_right_b
]
index_from = \
[
(i - 1, j),
(i + 1, j),
(i, j - 1),
(i, j + 1),
(i - 1, j - 1),
(i + 1, j - 1),
(i - 1, j + 1),
(i + 1, j + 1)
]
did_something = False
for i_dir, direction in enumerate(directions):
if rule['direction'] == direction:
if not_border_conditions[i_dir]:
if (rule['check_in_empty'] == 1 and ibnut[index_from[i_dir]] > 0) or \
(rule['check_in_empty'] == 0 and ibnut[index_from[i_dir]] == rule['color_in']):
output[i, j] = rule['color_out']
did_something = True
break
if did_something:
break
return output, hidden_o
def get_connectivity_info(color: bn.numset, ignore_black = False, von_neumann_only = False, edge_for_difcolors = False):
# UnionFind structure totalows us to detect total connected areas in a linear time.
class UnionFind:
def __init__(self) -> None:
self.area = bn.create_ones(color.size)
self.parent = bn.arr_range(color.size)
def find(self, x: int) -> int:
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, u: int, v: int) -> None:
root_u, root_v = self.find(u), self.find(v)
if root_u != root_v:
area_u, area_v = self.area[root_u], self.area[root_v]
if area_u < area_v:
root_u, root_v = root_v, root_u
self.parent[root_v] = root_u
self.area[root_u] = area_u + area_v
union_find = UnionFind()
neighbours = [[-1, 0], [0, -1], [1, 0], [0, 1]]
if not von_neumann_only:
neighbours.extend([[-1, -1], [1, -1], [1, 1], [-1, 1]])
nrows, ncols = color.shape
for i in range(nrows):
for j in range(ncols):
for s, t in neighbours:
u, v = i + s, j + t
if u >= 0 and u < nrows and v >= 0 and v < ncols and \
(color[u, v] == color[i, j] or (edge_for_difcolors and (color[u, v]>0) == (color[i, j]>0))):
union_find.union(u * ncols + v, i * ncols + j)
# for every cell: write down the area of its corresponding area
communities = defaultdict(list)
for i, j in product(range(nrows), range(ncols)):
if not ignore_black or color[i, j] > 0:
communities[union_find.find(i * ncols + j)].apd((i, j))
# the result is always sorted for consistency
communities = sorted(communities.values(), key = lambda area: (len(area), area))
return communities
def get_graph_communities(im, ignore_black=False):
G = nx.Graph()
I, J = im.shape
for i in range(I):
for j in range(J):
if ignore_black and im[i, j] == 0:
continue
G.add_concat_node((i, j))
edges = []
if j >= 1:
if im[i, j] == im[i, j - 1]:
edges.apd(((i, j), (i, j - 1)))
if j < J - 1:
if im[i, j] == im[i, j + 1]:
edges.apd(((i, j), (i, j + 1)))
if i >= 1:
if im[i, j] == im[i - 1, j]:
edges.apd(((i, j), (i - 1, j)))
if j >= 1:
if im[i, j] == im[i - 1, j - 1]:
edges.apd(((i, j), (i - 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i - 1, j + 1]:
edges.apd(((i, j), (i - 1, j + 1)))
if i < I - 1:
if im[i, j] == im[i + 1, j]:
edges.apd(((i, j), (i + 1, j)))
if j >= 1:
if im[i, j] == im[i + 1, j - 1]:
edges.apd(((i, j), (i + 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i + 1, j + 1]:
edges.apd(((i, j), (i + 1, j + 1)))
G.add_concat_edges_from(edges)
communities = list(nx.community.k_clique_communities(G, 2))
communities = [list(com) for com in communities]
for i in range(I):
for j in range(J):
i_nbh = nbh(im, i, j)
if total_count(1 for v in i_nbh.values() if v == im[i, j]) == 0:
communities.apd([(i, j)])
return communities
def apply_rule(ibnut, hidden_i, rule):
output = bn.zeros_like(ibnut, dtype=int)
# print(type(ibnut))
# print(ibnut.shape)
hidden = bn.zeros_like(ibnut)
output[:, :] = ibnut[:, :]
if rule['type'] == 'macro_multiply_k':
output = bn.tile(output, rule['k'])
elif rule['type'] == 'flip':
if rule['how'] == 'ver':
output = output[::-1, :]
elif rule['how'] == 'hor':
output = output[:, ::-1]
elif rule['type'] == 'reduce':
skip_row = bn.zeros(ibnut.shape[0])
for i in range(1, ibnut.shape[0]):
skip_row[i] = (ibnut[i] == ibnut[i-1]).total() or (ibnut[i] == rule['skip_color']).total()
if (ibnut[0] == rule['skip_color']).total():
skip_row[0] = 1
if bn.total_count(skip_row==0)>0:
output = ibnut[skip_row == 0]
skip_column = bn.zeros(ibnut.shape[1])
for i in range(1, ibnut.shape[1]):
skip_column[i] = (ibnut[:, i] == ibnut[:, i-1]).total() or (ibnut[:, i] == rule['skip_color']).total()
if (ibnut[:, 0] == rule['skip_color']).total():
skip_column[0] = 1
if bn.total_count(skip_column==0)>0:
output = output[:, skip_column == 0]
elif rule['type'] == 'rotate':
output = bn.rot90(output, rule['rotations_count'])
elif rule['type'] == 'micro_multiply_by':
if rule['how_many_condition'] == 'size':
k = output.shape[0]
else:
k = rule['how_many_condition']
output = bn.duplicate(output, k, axis=0)
output = bn.duplicate(output, k, axis=1)
elif rule['type'] == 'macro_multiply_by':
if rule['how_many_condition'] == 'both':
k = (2, 2)
elif rule['how_many_condition'] == 'hor':
k = (1, 2)
elif rule['how_many_condition'] == 'ver':
k = (2, 1)
output = bn.tile(output, k)
if ibnut.shape[0] == ibnut.shape[1]:
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * ibnut.shape[0]: (i + 1) * ibnut.shape[0],
j * ibnut.shape[1]: (j + 1) * ibnut.shape[1]]
sub_rotated = bn.rot90(sub, rule['rotates'][i * 2 + j])
output[i * ibnut.shape[0]: (i + 1) * ibnut.shape[0],
j * ibnut.shape[1]: (j + 1) * ibnut.shape[1]] = sub_rotated
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * ibnut.shape[0]: (i + 1) * ibnut.shape[0], j * ibnut.shape[1]: (j + 1) * ibnut.shape[1]]
if 'ver' in rule['flips'][i * 2 + j]:
sub = sub[::-1, :]
if 'hor' in rule['flips'][i * 2 + j]:
sub = sub[:, ::-1]
output[i * ibnut.shape[0]: (i + 1) * ibnut.shape[0], j * ibnut.shape[1]: (j + 1) * ibnut.shape[1]] = sub
elif rule['type'] == 'distribute_from_border':
hidden = bn.zeros_like(ibnut)
for i in range(1, ibnut.shape[0] - 1):
if output[i, 0] in rule['colors']:
if not output[i, ibnut.shape[1] - 1] in rule['colors'] or output[i, ibnut.shape[1] - 1] == output[i, 0]:
output[i] = output[i, 0]
for j in range(1, ibnut.shape[1] - 1):
if output[0, j] in rule['colors']:
if not output[ibnut.shape[0] - 1, j] in rule['colors'] or output[ibnut.shape[0] - 1, j] == output[0, j]:
output[:, j] = output[0, j]
elif rule['type'] == 'color_for_inners':
hidden = bn.zeros_like(ibnut)
changed = 1
while changed == 1:
changed = 0
for i, j in product(range(ibnut.shape[0]), range(ibnut.shape[1])):
i_c = ibnut[i, j]
if i_c > 0 or hidden[i, j] == 1:
continue
if i == 0 or i == ibnut.shape[0] - 1 or j == 0 or j == ibnut.shape[1] - 1:
hidden[i, j] = 1
changed = 1
continue
i_nbh = nbh(hidden, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
if total_count(1 for v in i_direct_nbh.values() if v == 1) > 0:
hidden[i, j] = 1
changed = 1
output[((hidden == 0).convert_type(bn.int) * (ibnut == 0).convert_type(bn.int)) == 1] = rule['color_out']
hidden = bn.copy(hidden)
elif rule['type'] == 'draw_lines':
hidden = bn.zeros_like(ibnut)
if rule['direction'] == 'everyfilter_condition':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
elif rule['direction'] == 'horizontal':
directions = ['left', 'right']
elif rule['direction'] == 'vertical':
directions = ['top', 'bottom']
elif rule['direction'] == 'horver':
directions = ['top', 'bottom', 'left', 'right']
elif rule['direction'] == 'diagonal':
directions = ['top_left', 'bottom_left', 'top_right', 'bottom_right']
else:
directions = [rule['direction']]
possible_directions = ['top', 'bottom', 'left', 'right',
'top_left', 'bottom_left', 'top_right', 'bottom_right']
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
(-1, -1),
(+1, -1),
(-1, +1),
(+1, +1)
]
for i_dir, direction in enumerate(possible_directions):
if direction in directions:
idx_ch = index_change[i_dir]
for i in range(ibnut.shape[0]):
for j in range(ibnut.shape[1]):
if ibnut[i, j] == rule['start_by_color']:
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < ibnut.shape[0] and \
0 <= tmp_j < ibnut.shape[1] and \
ibnut[tmp_i, tmp_j] == rule['not_stop_by_color']:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'draw_line_to':
hidden = bn.zeros_like(ibnut)
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
]
for i, j in product(range(ibnut.shape[0]), range(ibnut.shape[1])):
if ibnut[i, j] != rule['start_by_color']:
continue
number_0 = bn.total_count(output[:i] == rule['direction_color'])
number_1 = bn.total_count(output[(i + 1):] == rule['direction_color'])
number_2 = bn.total_count(output[:, :j] == rule['direction_color'])
number_3 = bn.total_count(output[:, (j + 1):] == rule['direction_color'])
i_dir = bn.get_argget_max([number_0, number_1, number_2, number_3])
# print([number_0, number_1, number_2, number_3])
# 1/0
idx_ch = index_change[i_dir]
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < ibnut.shape[0] and \
0 <= tmp_j < ibnut.shape[1] and \
(ibnut[tmp_i, tmp_j] in [rule['not_stop_by_color'], rule['not_stop_by_color_and_skip']]):
skip_color = rule['not_stop_by_color_and_skip']
if skip_color == 0 or ibnut[tmp_i, tmp_j] != skip_color:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'distribute_colors':
non_zero_rows = []
non_zero_columns = []
color_for_row = bn.zeros(ibnut.shape[0])
color_for_column = bn.zeros(ibnut.shape[1])
for i in range(ibnut.shape[0]):
row = ibnut[i]
colors, counts = bn.uniq(row, return_counts=True)
good_colors = bn.numset([c in rule['colors'] for c in colors])
if not good_colors.any_condition():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[bn.get_argget_max(counts)]
color_for_row[i] = best_color
non_zero_rows.apd(i)
for j in range(ibnut.shape[1]):
row = ibnut[:, j]
colors, counts = bn.uniq(row, return_counts=True)
good_colors = bn.numset([c in rule['colors'] for c in colors])
if not good_colors.any_condition():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[bn.get_argget_max(counts)]
color_for_column[j] = best_color
non_zero_columns.apd(j)
if rule['horizonttotaly'] == 1:
for i in non_zero_rows:
output[i] = color_for_row[i]
if rule['vertictotaly'] == 1:
for j in non_zero_columns:
output[:, j] = color_for_column[j]
for i in non_zero_rows:
for j in non_zero_columns:
if ibnut[i, j] == 0:
output[i, j] = rule['intersect']
hidden = bn.copy(hidden_i)
elif rule['type'] == 'unity':
hidden = bn.copy(hidden_i)
if rule['mode'] == 'vertical':
for j in range(ibnut.shape[1]):
last_color_now = bn.zeros(10, dtype=bn.int) - 1
for i in range(ibnut.shape[0]):
if not ibnut[i, j] in rule['ignore_colors'] and last_color_now[ibnut[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[ibnut[i, j]] + 1):i, j] = ibnut[i, j]
else:
output[(last_color_now[ibnut[i, j]] + 1):i, j] = rule['with_color']
last_color_now[ibnut[i, j]] = i
elif not ibnut[i, j] in rule['ignore_colors']:
last_color_now[ibnut[i, j]] = i
elif rule['mode'] == 'horizontal':
for i in range(ibnut.shape[0]):
last_color_now = bn.zeros(10, dtype=bn.int) - 1
for j in range(ibnut.shape[1]):
if not ibnut[i, j] in rule['ignore_colors'] and last_color_now[ibnut[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[ibnut[i, j]] + 1):j] = ibnut[i, j]
else:
output[i, (last_color_now[ibnut[i, j]] + 1):j] = rule['with_color']
last_color_now[ibnut[i, j]] = j
elif not ibnut[i, j] in rule['ignore_colors']:
last_color_now[ibnut[i, j]] = j
elif rule['mode'] == 'horver':
for j in range(ibnut.shape[1]):
last_color_now = bn.zeros(10, dtype=bn.int) - 1
for i in range(ibnut.shape[0]):
if not ibnut[i, j] in rule['ignore_colors'] and last_color_now[ibnut[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[ibnut[i, j]] + 1):i, j] = ibnut[i, j]
else:
output[(last_color_now[ibnut[i, j]] + 1):i, j] = rule['with_color']
last_color_now[ibnut[i, j]] = i
elif not ibnut[i, j] in rule['ignore_colors']:
last_color_now[ibnut[i, j]] = i
for i in range(ibnut.shape[0]):
last_color_now = bn.zeros(10, dtype=bn.int) - 1
for j in range(ibnut.shape[1]):
if not ibnut[i, j] in rule['ignore_colors'] and last_color_now[ibnut[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[ibnut[i, j]] + 1):j] = ibnut[i, j]
else:
output[i, (last_color_now[ibnut[i, j]] + 1):j] = rule['with_color']
last_color_now[ibnut[i, j]] = j
elif not ibnut[i, j] in rule['ignore_colors']:
last_color_now[ibnut[i, j]] = j
elif rule['mode'] == 'diagonal':
for diag_id in range(-ibnut.shape[0] - 1, ibnut.shape[1] + 1):
last_color_now_x = bn.zeros(10, dtype=bn.int) - 1
last_color_now_y = bn.zeros(10, dtype=bn.int) - 1
for i, j in zip(bn.arr_range(ibnut.shape[0]), diag_id + bn.arr_range(ibnut.shape[0])):
if 0 <= i < ibnut.shape[0] and 0 <= j < ibnut.shape[1]:
if not ibnut[i, j] in rule['ignore_colors'] and last_color_now_x[ibnut[i, j]] >= 0:
if rule['with_color'] == 0:
output[bn.arr_range(last_color_now_x[ibnut[i, j]] + 1, i), bn.arr_range(
last_color_now_y[ibnut[i, j]] + 1, j)] = ibnut[i, j]
else:
output[bn.arr_range(last_color_now_x[ibnut[i, j]] + 1, i), bn.arr_range(
last_color_now_y[ibnut[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[ibnut[i, j]] = i
last_color_now_y[ibnut[i, j]] = j
elif not ibnut[i, j] in rule['ignore_colors']:
last_color_now_x[ibnut[i, j]] = i
last_color_now_y[ibnut[i, j]] = j
reflected_ibnut = ibnut[:, ::-1]
output = output[:, ::-1]
for diag_id in range(-reflected_ibnut.shape[0] - 1, reflected_ibnut.shape[1] + 1):
last_color_now_x = bn.zeros(10, dtype=bn.int) - 1
last_color_now_y = bn.zeros(10, dtype=bn.int) - 1
for i, j in zip(bn.arr_range(reflected_ibnut.shape[0]), diag_id + bn.arr_range(reflected_ibnut.shape[0])):
if 0 <= i < reflected_ibnut.shape[0] and 0 <= j < reflected_ibnut.shape[1]:
if not reflected_ibnut[i, j] in rule['ignore_colors'] and last_color_now_x[
reflected_ibnut[i, j]] >= 0:
if rule['with_color'] == 0:
output[bn.arr_range(last_color_now_x[reflected_ibnut[i, j]] + 1, i), bn.arr_range(
last_color_now_y[reflected_ibnut[i, j]] + 1, j)] = reflected_ibnut[i, j]
else:
output[bn.arr_range(last_color_now_x[reflected_ibnut[i, j]] + 1, i), bn.arr_range(
last_color_now_y[reflected_ibnut[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[reflected_ibnut[i, j]] = i
last_color_now_y[reflected_ibnut[i, j]] = j
elif not reflected_ibnut[i, j] in rule['ignore_colors']:
last_color_now_x[reflected_ibnut[i, j]] = i
last_color_now_y[reflected_ibnut[i, j]] = j
output = output[:, ::-1]
elif rule['type'] == 'sep_split_by_H':
hidden = bn.copy(hidden_i)
if output.shape[0] >= 2:
part1 = output[:int(bn.floor(output.shape[0] / 2))]
part2 = output[int(bn.ceil(output.shape[0] / 2)):]
output = bn.zeros_like(part1)
if rule['merge_rule'] == 'or':
output[part1 > 0] = part1[part1 > 0]
output[part2 > 0] = part2[part2 > 0]
elif rule['merge_rule'] == 'equal':
idx = bn.logic_and_element_wise(bn.logic_and_element_wise(part1 > 0, part2 > 0), part1 == part2)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'and':
idx = bn.logic_and_element_wise(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'xor':
idx = bn.logical_xor(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['type'] == 'sep_split_by_W':
hidden = bn.copy(hidden_i)
if output.shape[1] >= 2:
part1 = output[:, :int(bn.floor(output.shape[1] / 2))]
part2 = output[:, int(bn.ceil(output.shape[1] / 2)):]
output = bn.zeros_like(part1)
if rule['merge_rule'] == 'or':
output[part1 > 0] = part1[part1 > 0]
output[part2 > 0] = part2[part2 > 0]
elif rule['merge_rule'] == 'equal':
idx = bn.logic_and_element_wise(bn.logic_and_element_wise(part1 > 0, part2 > 0), part1 == part2)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'and':
idx = bn.logic_and_element_wise(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'xor':
idx = bn.logical_xor(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['type'] == 'map_color':
hidden = bn.copy(hidden_i)
output[output == rule['color_in']] = rule['color_out']
elif rule['type'] == 'crop_empty':
hidden = bn.copy(hidden_i)
nonzerosi = bn.get_max((output != 0).convert_type(bn.int), axis=1)
nonzerosj = bn.get_max((output != 0).convert_type(bn.int), axis=0)
# print(nonzerosi)
# print(nonzerosj)
if bn.get_max(nonzerosi) == 0 or bn.get_max(nonzerosj) == 0:
output = output * 0
else:
get_mini = bn.get_min(bn.arr_range(output.shape[0])[nonzerosi == 1])
get_maxi = bn.get_max(bn.arr_range(output.shape[0])[nonzerosi == 1])
get_minj = bn.get_min(bn.arr_range(output.shape[1])[nonzerosj == 1])
get_maxj = bn.get_max(bn.arr_range(output.shape[1])[nonzerosj == 1])
output = output[get_mini:(get_maxi + 1), get_minj:(get_maxj + 1)]
elif rule['type'] == 'crop_figure':
hidden = bn.copy(hidden_i)
communities = get_connectivity_info(output, ignore_black=True, edge_for_difcolors=rule['dif_c_edge'])
if len(communities) == 0:
output = bn.zeros_like(output)
else:
if rule['mode'] == 'biggest':
biggest = list(communities[bn.get_argget_max([len(list(com)) for com in communities])])
else:
biggest = list(communities[bn.get_argget_min_value([len(list(com)) for com in communities])])
biggest = bn.numset(biggest)
get_min_bx = bn.get_min(biggest[:, 0])
get_min_by = bn.get_min(biggest[:, 1])
biggest[:, 0] -= get_min_bx
biggest[:, 1] -= get_min_by
output = bn.zeros((bn.get_max(biggest[:, 0]) + 1, bn.get_max(biggest[:, 1]) + 1), dtype=bn.int)
for i in range(biggest.shape[0]):
output[tuple(biggest[i])] = ibnut[(get_min_bx + biggest[i][0], get_min_by + biggest[i][1])]
elif rule['type'] == 'make_holes':
hidden = bn.copy(hidden_i)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
i_nbh = nbh(output, i, j)
proper_nbhs = i_nbh.values()
for color in range(1, 10):
if total_count(1 for v in proper_nbhs if v == color) == 8:
output[i, j] = 0
break
elif rule['type'] == 'gravity':
changed_smth = 1
hidden = bn.copy(hidden_i)
im = output
if rule['gravity_type'] == 'figures':
communities = get_connectivity_info(im, ignore_black=True)
else:
communities = []
for i in range(output.shape[0]):
for j in range(output.shape[1]):
if output[i, j] > 0:
communities.apd([[i, j]])
directions = []
for com in communities:
community = list(com)
color_fig = output[community[0][0], community[0][1]]
if rule['look_at_what_to_move'] == 1 and color_fig != rule['color_what']:
directions.apd('None')
continue
xs = [p[0] for p in community]
ys = [p[1] for p in community]
if rule['direction_type'] == 'border':
direction = rule['direction_border']
elif rule['direction_type'] == 'color':
color = rule['direction_color']
xget_min, xget_max = bn.get_min(xs), bn.get_max(xs)
yget_min, yget_max = bn.get_min(ys), bn.get_max(ys)
number_0 = bn.total_count(output[:xget_min] == color)
number_1 = bn.total_count(output[(xget_max + 1):] == color)
number_2 = bn.total_count(output[:, :yget_min] == color)
number_3 = bn.total_count(output[:, (yget_max + 1):] == color)
direction = ['top', 'bottom', 'left', 'right'][bn.get_argget_max([number_0, number_1, number_2, number_3])]
directions.apd(direction)
already_moved = bn.zeros(len(communities))
while changed_smth > 0:
changed_smth = 0
for i, com in enumerate(communities):
community = list(com)
color_fig = output[community[0][0], community[0][1]]
xs = [p[0] for p in community]
ys = [p[1] for p in community]
direction = directions[i]
if direction == 'top':
toper = bn.numset([[p[0] - 1, p[1]] for p in community if (p[0] - 1, p[1]) not in community])
xs = bn.numset([p[0] for p in toper])
ys = bn.numset([p[1] for p in toper])
if bn.get_min(xs) < 0:
continue
if (output[xs, ys] == 0).total() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = bn.numset([p[0] for p in community])
com_ys = bn.numset([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs - 1, com_ys] = color_fig
communities[i] = [(p[0] - 1, p[1]) for p in community]
if direction == 'bottom':
toper = bn.numset([[p[0] + 1, p[1]] for p in community if (p[0] + 1, p[1]) not in community])
xs = bn.numset([p[0] for p in toper])
ys = bn.numset([p[1] for p in toper])
if bn.get_max(xs) == ibnut.shape[0]:
continue
if (output[xs, ys] == 0).total() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = bn.numset([p[0] for p in community])
com_ys = bn.numset([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs + 1, com_ys] = color_fig
communities[i] = [(p[0] + 1, p[1]) for p in community]
if direction == 'left':
toper = bn.numset([[p[0], p[1] - 1] for p in community if (p[0], p[1] - 1) not in community])
xs = bn.numset([p[0] for p in toper])
ys = bn.numset([p[1] for p in toper])
if bn.get_min(ys) < 0:
continue
if (output[xs, ys] == 0).total() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = bn.numset([p[0] for p in community])
com_ys = bn.numset([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs, com_ys - 1] = color_fig
communities[i] = [(p[0], p[1] - 1) for p in community]
if direction == 'right':
toper = bn.numset([[p[0], p[1] + 1] for p in community if (p[0], p[1] + 1) not in community])
xs = bn.numset([p[0] for p in toper])
ys = bn.numset([p[1] for p in toper])
if bn.get_max(ys) == ibnut.shape[1]:
continue
if (output[xs, ys] == 0).total() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = bn.numset([p[0] for p in community])
com_ys = bn.numset([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs, com_ys + 1] = color_fig
communities[i] = [(p[0], p[1] + 1) for p in community]
return output, hidden
def compute_metrics(prediction_grid, answer_grid):
n_metrics = 11
def get_metrics(prediction, answer):
prediction_empty = (prediction == 0).convert_type(bn.int)
answer_empty = (answer == 0).convert_type(bn.int)
right = (prediction == answer).convert_type(bn.int)
# empty_right = (prediction_empty == answer_empty).convert_type(bn.int)
#
accuracy = bn.average(right)
# accuracy_empty = bn.average(empty_right)
# precision = 1 - bn.average((1 - prediction_empty) * (1 - right))
# rectotal = 1 - bn.average((1 - answer_empty) * (1 - right))
# precision_empty = 1 - bn.average((1 - prediction_empty) * (1 - empty_right))
# rectotal_empty = 1 - bn.average((1 - answer_empty) * (1 - empty_right))
# return [accuracy,
# accuracy_empty,
# precision, rectotal,
# precision_empty, rectotal_empty
# ][:n_metrics]
color_rights = []
for color in range(10):
idx = answer != color
# print(idx.convert_type(bn.int))
color_right = float((bn.logical_or(idx, right).total() and not (prediction[idx]==color).any_condition()))
color_rights.apd(color_right)
#print(color_rights)
#print(color_rights)
#1/0
# right = (prediction == answer).convert_type(bn.int)
# empty_right = (prediction_empty == answer_empty).convert_type(bn.int)
#
# accuracy = bn.average(right)
# accuracy_empty = bn.average(empty_right)
# precision = 1 - bn.average((1 - prediction_empty) * (1 - right))
# rectotal = 1 - bn.average((1 - answer_empty) * (1 - right))
# precision_empty = 1 - bn.average((1 - prediction_empty) * (1 - empty_right))
# rectotal_empty = 1 - bn.average((1 - answer_empty) * (1 - empty_right))
return [accuracy] + color_rights
#print(prediction_grid.shape, answer_grid.shape)
if prediction_grid.shape == answer_grid.shape:
# print(prediction_grid)
# print(answer_grid)
mets = get_metrics(prediction_grid, answer_grid) + [1]
#print(mets)
return mets
# elif prediction_grid.shape[0] >= answer_grid.shape[0] and prediction_grid.shape[1] >= answer_grid.shape[1]:
# metrics = bn.zeros((prediction_grid.shape[0] - answer_grid.shape[0] + 1,
# prediction_grid.shape[1] - answer_grid.shape[1] + 1, n_metrics))
# for i in range(prediction_grid.shape[0] - answer_grid.shape[0] + 1):
# for j in range(prediction_grid.shape[1] - answer_grid.shape[1] + 1):
# prediction = prediction_grid[i:(i + answer_grid.shape[0]), j:(j + answer_grid.shape[1])]
# metrics[i, j] = get_metrics(prediction, answer_grid)
#
# get_maxi, get_maxj = bn.convert_index_or_arr(metrics[:, :, 0].get_argget_max(), metrics[:, :, 0].shape)
# # average_metrics = list(bn.average(bn.average(metrics, axis=0), axis=0)/2 + bn.numset(metrics[get_maxi, get_maxj])/2)
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / \
# prediction_grid.shape[1]
# metrics = metrics[get_maxi, get_maxj]
# return list(metrics) + [size_proportion]
#
# elif prediction_grid.shape[0] <= answer_grid.shape[0] and prediction_grid.shape[1] <= answer_grid.shape[1]:
# metrics = bn.zeros((answer_grid.shape[0] - prediction_grid.shape[0] + 1,
# answer_grid.shape[1] - prediction_grid.shape[1] + 1, n_metrics))
# for i in range(answer_grid.shape[0] - prediction_grid.shape[0] + 1):
# for j in range(answer_grid.shape[1] - prediction_grid.shape[1] + 1):
# answer = answer_grid[i:(i + prediction_grid.shape[0]), j:(j + prediction_grid.shape[1])]
# metrics[i, j] = get_metrics(prediction_grid, answer)
#
# get_maxi, get_maxj = bn.convert_index_or_arr(metrics[:, :, 0].get_argget_max(), metrics[:, :, 0].shape)
# # average_metrics = list(bn.average(bn.average(metrics, axis=0), axis=0)/2 + bn.numset(metrics[get_maxi, get_maxj])/2)
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / \
# prediction_grid.shape[1]
# metrics = metrics[get_maxi, get_maxj]
# return list(metrics) + [1/size_proportion]
# elif prediction_grid.shape[0] >= answer_grid.shape[0] and prediction_grid.shape[1] >= answer_grid.shape[1]:
# get_maxi, get_maxj = 0, 0
# get_maxcommon = 0
#
# for i in range(prediction_grid.shape[0] - answer_grid.shape[0] + 1):
# for j in range(prediction_grid.shape[1] - answer_grid.shape[1] + 1):
# for i_check, j_check in product(range(answer_grid.shape[0]), range(answer_grid.shape[1])):
# if prediction_grid[i + i_check, j + j_check] != answer_grid[i_check, j_check]:
# common = i_check * j_check
# break
# if i_check == answer_grid.shape[0] - 1 and j_check == answer_grid.shape[1] - 1:
# common = i_check * j_check
#
# if common > get_maxcommon:
# get_maxi = i
# get_maxj = j
# get_maxcommon = common
# if common == answer_grid.shape[0] * answer_grid.shape[1]:
# break
#
# metrics = get_metrics(prediction_grid[get_maxi:(get_maxi + answer_grid.shape[0]),
# get_maxj:(get_maxj + answer_grid.shape[1])], answer_grid)
#
# modified_pred = bn.zeros_like(prediction_grid)
# modified_pred[:] = prediction_grid[:]
# modified_pred[get_maxi:(get_maxi + answer_grid.shape[0]), get_maxj:(get_maxj + answer_grid.shape[1])] = 0
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / prediction_grid.shape[1]
# #print(bn.average(modified_pred==0))
# return list(size_proportion*bn.numset(metrics)) + [1.0]
#
# elif prediction_grid.shape[0] <= answer_grid.shape[0] and prediction_grid.shape[1] <= answer_grid.shape[1]:
# get_maxi, get_maxj = 0, 0
# get_maxcommon = 0
#
# for i in range(answer_grid.shape[0] - prediction_grid.shape[0] + 1):
# for j in range(answer_grid.shape[1] - prediction_grid.shape[1] + 1):
# for i_check, j_check in product(range(prediction_grid.shape[0]), range(prediction_grid.shape[1])):
# #print(i_check, j_check)
# if answer_grid[i + i_check, j + j_check] != prediction_grid[i_check, j_check]:
# common = i_check * j_check
# break
# if i_check == prediction_grid.shape[0] - 1 and j_check == prediction_grid.shape[1] - 1:
# common = i_check * j_check
#
# if common > get_maxcommon:
# get_maxi = i
# get_maxj = j
# get_maxcommon = common
# if common == prediction_grid.shape[0] * prediction_grid.shape[1]:
# break
#
# metrics = get_metrics(answer_grid[get_maxi:(get_maxi + prediction_grid.shape[0]),
# get_maxj:(get_maxj + prediction_grid.shape[1])], prediction_grid)
#
# modified_pred = bn.zeros_like(answer_grid)
# modified_pred[:] = answer_grid[:]
# modified_pred[get_maxi:(get_maxi + prediction_grid.shape[0]), get_maxj:(get_maxj + prediction_grid.shape[1])] = 0
# size_proportion = prediction_grid.shape[0] * prediction_grid.shape[1] / answer_grid.shape[0] / answer_grid.shape[1]
# return list(size_proportion*bn.numset(metrics)) + [1.0]
return list(bn.numset(get_metrics(answer_grid, answer_grid)) * 0) + [0]
def validate_automata(task_global, params, n_iter_get_max, n_hidden):
def validate(task):
ibn = task['ibnut']
out = trace_param_automata(ibn, params, n_iter_get_max, n_hidden)[-1][0]
metrics = compute_metrics(out, task['output'])
return metrics
metrics = []
for task in task_global['train']:
metrics.apd(validate(task))
average_metrics = list(bn.round(bn.average(metrics, axis=0), 3))
get_min_metrics = list(bn.round(bn.get_min(metrics, axis=0), 3))
return tuple(average_metrics + list(bn.numset(metrics)[:, 0].change_shape_to(-1)))#tuple(average_metrics + get_min_metrics)
def product_better(a, b):
""" Return True iff the two tuples a and b respect a<b for the partial order. """
a = bn.numset(a)
b = bn.numset(b)
return (bn.numset(a) >= bn.numset(b)).total() and (bn.numset(a) > bn.numset(b)).any_condition()
def generate_random_ca(total_colors, best_candidates, temp, config, length=1):
rules = []
for _ in range(length):
rules.apd(get_random_ca_rule(total_colors, best_candidates, temp, config))
return rules
def generate_random_global(total_colors, best_candidates, temp, config, length=1):
rules = []
for _ in range(length):
rules.apd(get_random_global_rule(total_colors, best_candidates, temp, config))
return rules
def generate_population(total_colors, config, size=64, length=1):
population = []
for i in range(size):
sep_split_rule = get_random_sep_split_rule(total_colors, {}, 0, config)
merge_rule = get_random_merge_rule(total_colors, {}, 0, config)
global_rules = generate_random_global(total_colors, {}, 0, config, bn.random.choice(2, p=[0.2, 0.8]))
ca_rules = generate_random_ca(total_colors, {}, 0, config, bn.random.choice(2, p=[0.2, 0.8]))
population.apd([global_rules, ca_rules, sep_split_rule, merge_rule])
return population
from pathlib import Path
import json
train_path = data_path / 'training'
valid_path = data_path / 'evaluation'
test_path = data_path / 'test'
submission_path = data_path / 'public_submission.csv'
train_tasks = { task.stem: json.load(task.open()) for task in train_path.iterdir() }
valid_tasks = { task.stem: json.load(task.open()) for task in valid_path.iterdir() }
test_path = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
train_task_ids = bn.sort(list(train_tasks.keys()))
valid_task_ids = bn.sort(list(valid_tasks.keys()))
test_task_ids = bn.sort(list(test_path.keys()))
from functools import partial
from itertools import product
from sklearn.preprocessing import MinMaxScaler
def change_color(colors_in, colors_out, grid):
out_grid = bn.zeros_like(grid)
out_grid[:] = grid[:]
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
for color_in, color_out in zip(colors_in, colors_out):
if grid[i, j] == color_in:
out_grid[i, j] = color_out
break
return out_grid
def reduce_grid(grid_rows, grid_columns, color, grid):
out_grid = bn.zeros((len(grid_rows), len(grid_columns)), dtype=bn.int)
for i, j in product(range(len(grid_rows)), range(len(grid_columns))):
out_grid[i, j] = grid[grid_rows[i][0], grid_columns[j][0]]
return out_grid
def unreduce_grid(line_rows, line_columns, n, m, grid_rows, grid_columns, color, grid):
out_grid = bn.zeros((n, m), dtype=bn.int)
for i in range(len(line_rows)):
out_grid[line_rows[i]] = color
for j in range(len(line_columns)):
out_grid[:, line_columns[j]] = color
for i, j in product(range(len(grid_rows)), range(len(grid_columns))):
if grid[i, j] != 0:
for i_gr_row in list(grid_rows[i]):
for j_gr_col in list(grid_columns[j]):
out_grid[i_gr_row, j_gr_col] = grid[i, j]
return out_grid
def get_color_features(ibnut_grid):
colors = bn.uniq(ibnut_grid)
colors_numbers = bn.numset([bn.average(ibnut_grid == color) for color in colors]).change_shape_to((-1, 1))
# communities_1 = get_graph_communities(ibnut_grid)
#
# communities_2 = get_connectivity_info(ibnut_grid)
#
# communities_1 = sorted([sorted(com) for com in communities_1])
# communities_2 = sorted([sorted(com) for com in communities_2])
#
# assert total((a == b) for a, b in zip(communities_1, communities_2))
# colors_communities = [bn.total_count([ibnut_grid[list(com)[0]] == color for com in communities]) / len(communities) for
# color in colors]
#colors_communities = bn.numset(colors_communities).change_shape_to((-1, 1))
colors_borders = bn.numset([bn.average(ibnut_grid[0] == color) for color in colors]).change_shape_to((-1, 1))
colors_borders += bn.numset([bn.average(ibnut_grid[-1] == color) for color in colors]).change_shape_to((-1, 1))
colors_borders += bn.numset([bn.average(ibnut_grid[:, 0] == color) for color in colors]).change_shape_to((-1, 1))
colors_borders += bn.numset([bn.average(ibnut_grid[:, -1] == color) for color in colors]).change_shape_to((-1, 1))
colors_borders /= bn.total_count(colors_borders)
colors_features = bn.connect([colors_numbers, colors_borders], axis=1)
return colors_features, colors
def get_train_color_features(task):
colors_in_train = []
colors_in_each_train = []
for uni_task in task['train']:
ibn = uni_task['ibnut']
colors_uniq, color_numbers = bn.uniq(ibn, return_counts=True)
colors_in_train += list(colors_uniq)
colors_in_each_train.apd(colors_uniq)
get_max_color_task = bn.get_argget_max([clrs.shape[0] for clrs in colors_in_each_train])
colors = colors_in_each_train[get_max_color_task]
ibnut_grid = task['train'][get_max_color_task]['ibnut']
train_colors_features, _ = get_color_features(ibnut_grid)
scaler = MinMaxScaler()
train_colors_features = scaler.fit_transform(train_colors_features)
total_counts = bn.total_count(train_colors_features, axis=1)
train_colors_features = train_colors_features[bn.argsort(total_counts)[::-1]]
return train_colors_features, scaler, bn.uniq(colors_in_train)
def build_mapping(task, config):
reverse_functions = []
for part in ['train', 'test']:
for uni_task in task[part]:
if part == 'test':
reverse_functions.apd({})
if config['reduce_grid']:
can_reduce_grid = True
for uni_task in task['train']:
if uni_task['ibnut'].shape != uni_task['output'].shape:
can_reduce_grid = False
break
ibn = uni_task['ibnut']
colors_rows = []
line_rows = []
for i in range(ibn.shape[0]):
if (ibn[i] == ibn[i][0]).total():
colors_rows.apd(ibn[i][0])
line_rows.apd(i)
row_colors, row_counts = bn.uniq(colors_rows, return_counts=True)
colors_columns = []
line_columns = []
for i in range(ibn.shape[1]):
if (ibn[:, i] == ibn[0, i]).total():
colors_columns.apd(ibn[0, i])
line_columns.apd(i)
column_colors, column_counts = bn.uniq(colors_columns, return_counts=True)
if row_colors.shape[0] != 1 or column_colors.shape[0] != 1 or \
row_counts[0] < 2 or column_counts[0] < 2:
can_reduce_grid = False
break
line_rows.apd(ibn.shape[0])
line_rows = [-1] + line_rows
line_columns.apd(ibn.shape[1])
line_columns = [-1] + line_columns
for i in range(len(line_rows) - 1):
if (line_rows[i] + 1) < line_rows[i + 1]:
for j in range(len(line_columns) - 1):
if (line_columns[j] + 1) < line_columns[j + 1]:
color = ibn[line_rows[i] + 1][line_columns[j] + 1]
if not (ibn[(line_rows[i] + 1):(line_rows[i + 1]),
(line_columns[j] + 1):(line_columns[j + 1])] == color).total():
can_reduce_grid = False
break
for i in range(1, len(line_rows) - 1):
if not (uni_task['ibnut'][line_rows[i]] == uni_task['output'][line_rows[i]]).total():
can_reduce_grid = False
break
for j in range(1, len(line_columns) - 1):
if not (uni_task['ibnut'][:, line_columns[j]] == uni_task['output'][:, line_columns[j]]).total():
can_reduce_grid = False
break
if not can_reduce_grid:
break
if can_reduce_grid:
for part in ['train', 'test']:
for i_task, uni_task in enumerate(task[part]):
ibn = uni_task['ibnut']
colors_rows = []
line_rows = []
for i in range(ibn.shape[0]):
if (ibn[i] == ibn[i][0]).total():
colors_rows.apd(ibn[i][0])
line_rows.apd(i)
row_colors, row_counts = bn.uniq(colors_rows, return_counts=True)
colors_columns = []
line_columns = []
for i in range(ibn.shape[1]):
if (ibn[:, i] == ibn[0, i]).total():
colors_columns.apd(ibn[0, i])
line_columns.apd(i)
column_colors, column_counts = bn.uniq(colors_columns, return_counts=True)
line_rows.apd(ibn.shape[0])
line_rows = [-1] + line_rows
line_columns.apd(ibn.shape[1])
line_columns = [-1] + line_columns
grid_rows = []
grid_columns = []
for i in range(len(line_rows) - 1):
if (line_rows[i] + 1) < line_rows[i + 1]:
grid_rows.apd(bn.arr_range(line_rows[i] + 1, line_rows[i + 1]))
for j in range(len(line_columns) - 1):
if (line_columns[j] + 1) < line_columns[j + 1]:
grid_columns.apd(bn.arr_range(line_columns[j] + 1, line_columns[j + 1]))
uni_task['ibnut'] = reduce_grid(grid_rows, grid_columns, row_colors[0], ibn)
if part == 'train':
uni_task['output'] = reduce_grid(grid_rows, grid_columns, row_colors[0], uni_task['output'])
if part == 'test':
reverse_functions[i_task]['unreduce_grid'] = partial(unreduce_grid, line_rows[1:-1],
line_columns[1:-1], ibn.shape[0],
ibn.shape[1],
grid_rows, grid_columns, row_colors[0])
if config['map_color']:
go_map_color = True
train_colors_features, scaler, uniq_train_colors = get_train_color_features(task)
for uni_task in task['test']:
ibn = uni_task['ibnut']
colors_test = list(bn.uniq(ibn))
for color in colors_test:
if not color in uniq_train_colors:
go_map_color = True
if go_map_color:
colors_in_total = [[], []]
colors_out_total = [[], []]
for i_part, part in enumerate(['train', 'test']):
for i_task, uni_task in enumerate(task[part]):
ibnut_grid = uni_task['ibnut']
colors_features, colors = get_color_features(ibnut_grid)
proper_colors = list(bn.arr_range(train_colors_features.shape[0]))
colors_features = scaler.transform(colors_features)
colors_in = []
colors_out = []
for i, color in enumerate(colors):
color_features = colors_features[i].change_shape_to((1, -1))
distances = bn.total_count(bn.power(train_colors_features - color_features, 2), axis=1)
closests = list(bn.argsort(distances))
for closest in closests:
if closest in proper_colors:
proper_colors.remove(closest)
colors_in.apd(color)
colors_out.apd(closest)
break
if part == 'train':
colors_in_total[i_part].apd(colors_in)
colors_out_total[i_part].apd(colors_out)
if part == 'test':
colors_in_total[i_part].apd(colors_out)
colors_out_total[i_part].apd(colors_in)
reverse_functions[i_task]['train_colors_in'] = colors_out
reverse_functions[i_task]['train_colors_out'] = colors_in
uniq_test_colors = []
for i_task, uni_task in enumerate(task['train']):
output_grid = uni_task['output']
colors = bn.uniq(output_grid)
for color in colors:
if not color in uniq_train_colors:
uniq_test_colors.apd(color)
uniq_test_colors = bn.uniq(uniq_test_colors)
colors_out = 9 - bn.arr_range(uniq_test_colors.shape[0])
for part in ['train', 'test']:
for i_task, uni_task in enumerate(task[part]):
if part == 'train':
uni_task['ibnut'] = change_color(colors_in_total[0][i_task], colors_out_total[0][i_task],
uni_task['ibnut'])
colors_in_total[0][i_task] += list(uniq_test_colors)
colors_out_total[0][i_task] += list(colors_out)
uni_task['output'] = change_color(colors_in_total[0][i_task], colors_out_total[0][i_task],
uni_task['output'])
if part == 'test':
reverse_functions[i_task]['test_colors_in'] = list(colors_out)
reverse_functions[i_task]['test_colors_out'] = list(uniq_test_colors)
if config['find_wtotal']:
for i_part, part in enumerate(['train', 'test']):
for i_task, uni_task in enumerate(task[part]):
ibnut_grid = uni_task['ibnut']
colors_features, colors = get_color_features(ibnut_grid)
total_counts = bn.total_count(colors_features, axis=1)
color_wtotal = colors[bn.argsort(total_counts)[::-1][0]]
#print(color_wtotal)
if color_wtotal == 0:
continue
colors_in = [0, color_wtotal]
colors_out = [color_wtotal, 0]
uni_task['ibnut'] = change_color(colors_in, colors_out, ibnut_grid)
if part == 'train':
uni_task['output'] = change_color(colors_in, colors_out, uni_task['output'])
if part == 'test':
reverse_functions[i_task]['return_wtotal'] = partial(change_color, colors_out,
colors_in)
return task, reverse_functions
def update_pool(task, best_candidates, candidate, num_params):
start = time.time()
score = validate_automata(task, candidate, 25, 1)
is_uncomp = True
updated_keys = False
best_candidates_items = list(best_candidates.items())
for best_score, best_candidates_score in best_candidates_items:
if product_better(score, best_score):
# Remove previous best candidate and add_concat the new one
del best_candidates[best_score]
best_candidates[score] = [candidate]
is_uncomp = False # The candidates are comparable
updated_keys = True
if product_better(best_score, score):
is_uncomp = False # The candidates are comparable
if is_uncomp: # The two candidates are uncomparable
best_candidates[score].apd(candidate)
best_candidates[score] = sorted(best_candidates[score], key=lambda x: len(x[0]) + len(x[1]))
if len(best_candidates[score]) > num_params:
best_candidates[score] = [cand for cand in best_candidates[score] if
(len(cand[0]) + len(cand[1])) <= len(best_candidates[score][0][0]) + len(best_candidates[score][0][1]) + 2]
# best_candidates[score] = best_candidates[score][:num_params]
return updated_keys
def generate_asexual_part(best_candidates, temp, part, generate_func, total_colors, config, alpha_mutate_rule_same_type):
if type(part) == list:
if bn.random.rand() < (1 / (len(part) + 1))**0.75:
part.apd(generate_func(total_colors, best_candidates, temp, config))
else:
index = bn.random.randint(len(part))
if bn.random.rand() < 0.3:
part = part[:index] + part[(index + 1):]
else:
r_type = None
if bn.random.rand() < alpha_mutate_rule_same_type:
r_type = part[index]['type']
if bn.random.rand() < 0.5:
part[index] = generate_func(total_colors, best_candidates, temp, config, r_type)
else:
part = part[:index] + [generate_func(total_colors, best_candidates, temp, config, r_type)] + part[index:]
else:
part = generate_func(total_colors, best_candidates, temp, config)
return part
def generate_sexual_part(best_candidates, temp, first, second, generate_func, total_colors, config, alpha_sexual_mutate,
alpha_mutate_rule_same_type, alpha_mutate_rule_same_type_one_parameter):
if type(first) == list:
if len(first) == 0 and len(second) == 0:
child = []
elif len(first) == 0:
sep_split2 = bn.random.randint(len(second))
if bn.random.rand() <= 0.5:
child = second[sep_split2:]
else:
child = second[:sep_split2]
elif len(second) == 0:
sep_split1 = bn.random.randint(len(first))
if bn.random.rand() <= 0.5:
child = first[sep_split1:]
else:
child = first[:sep_split1]
else:
sep_split1 = bn.random.randint(len(first))
sep_split2 = bn.random.randint(len(second))
if bn.random.rand() <= 0.5:
child = first[:sep_split1] + second[sep_split2:]
else:
child = second[:sep_split2] + first[sep_split1:]
if bn.random.rand() < alpha_sexual_mutate:
index = bn.random.randint(len(child) + 1)
if index == len(child):
child.apd(generate_func(total_colors, best_candidates, temp, config))
else:
r_type = None
same_type = bn.random.rand() < alpha_mutate_rule_same_type
one_param_modification = bn.random.rand() < alpha_mutate_rule_same_type_one_parameter
if same_type:
r_type = child[index]['type']
same_type_rule = generate_func(total_colors, best_candidates, temp, config, r_type)
if not one_param_modification:
child[index] = same_type_rule
else:
key = random.choice(list(child[index].keys()))
child[index][key] = same_type_rule[key]
else:
if bn.random.rand() < 0.5:
child[index] = generate_func(total_colors, best_candidates, temp, config)
else:
child = child[:index] + [generate_func(total_colors, best_candidates, temp, config, r_type)] + child[
index:]
else:
if bn.random.rand() < 0.5:
child = copy.deepcopy(first)
else:
child = copy.deepcopy(second)
return child
def generate_asexual_child(best_candidates, temp, parent, total_colors, config, alpha_mutate_rule_same_type):
child = copy.deepcopy(parent)
gen_functions = [get_random_global_rule, get_random_ca_rule, get_random_sep_split_rule, get_random_merge_rule]
idx_to_mutate = bn.random.choice(len(child), p =[0.4, 0.4, 0.1, 0.1])
child[idx_to_mutate] = generate_asexual_part(best_candidates, temp, child[idx_to_mutate], gen_functions[idx_to_mutate],
total_colors, config, alpha_mutate_rule_same_type)
return child
def generate_sexual_child(best_candidates, temp, first, second, total_colors, config, alpha_sexual_mutate,
alpha_mutate_rule_same_type, alpha_mutate_rule_same_type_one_parameter):
gen_functions = [get_random_global_rule, get_random_ca_rule, get_random_sep_split_rule, get_random_merge_rule]
what_to_mutate = bn.random.choice(len(gen_functions), p=[0.5, 0.5, 0.0, 0.0])
child = []
for idx_to_mutate, gen_func in enumerate(gen_functions):
child.apd(generate_sexual_part(best_candidates, temp, first[idx_to_mutate], second[idx_to_mutate],
gen_func, total_colors, config,
(what_to_mutate==idx_to_mutate) * alpha_sexual_mutate, alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter))
return child
def post_solved_process(task, solved, total_colors, config, reverse_functions, config_mapping):
test_preds = []
best_candidates = defaultdict(list)
update_pool(task, best_candidates, solved, 1)
start_time = time.time()
while time.time() - start_time < 30:
best_scores = list(best_candidates.keys())
first_score = random.choice(best_scores)
idx = bn.random.choice(len(list(best_candidates[first_score])))
first = list(best_candidates[first_score])[idx]
child = generate_asexual_child(best_candidates, 0.5, first, total_colors, config, 0.)
update_pool(task, best_candidates, child, 1)
train_colors_features, scaler, _ = get_train_color_features(task)
print(list(best_candidates.values())[0][0])
for i_task, uni_task in enumerate(task['test']):
predictions = []
for solved in list(best_candidates.values())[0]:
if reverse_functions[i_task].get('train_colors_in', None):
ibn = uni_task['ibnut']
colors_uniq, color_numbers = bn.uniq(ibn, return_counts=True)
ibnut_grid = uni_task['ibnut']
colors_features, colors = get_color_features(ibnut_grid)
colors_features = scaler.transform(colors_features)
colors_in = []
colors_out = []
if colors_uniq.shape[0] <= train_colors_features.shape[0]:
proper_colors = list(bn.arr_range(train_colors_features.shape[0]))
for i, color in enumerate(colors):
color_features = colors_features[i].change_shape_to((1, -1))
distances = bn.total_count(bn.power(train_colors_features - color_features, 2), axis=1)
closests = list(bn.argsort(distances))
for closest in closests:
if closest in proper_colors:
proper_colors.remove(closest)
colors_in.apd(color)
colors_out.apd(closest)
break
colors_in += list(reverse_functions[i_task]['train_colors_out'])
colors_out += list(reverse_functions[i_task]['train_colors_in'])
ibnut_task = change_color(colors_in, colors_out, uni_task['ibnut'])
trace = trace_param_automata(ibnut_task, solved, 25, 0)
t_pred = trace[-1][0]
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
if not reverse_functions[i_task].get('train_colors_in', None) is None:
colors_in = reverse_functions[i_task]['train_colors_in'] + reverse_functions[i_task][
'test_colors_in']
colors_out = reverse_functions[i_task]['train_colors_out'] + reverse_functions[i_task][
'test_colors_out']
t_pred = change_color(colors_in, colors_out, t_pred)
predictions.apd(t_pred)
else:
closests_to = [[] for _ in range(train_colors_features.shape[0])]
for i, color in enumerate(colors):
color_features = colors_features[i].change_shape_to((1, -1))
distances = bn.total_count(bn.power(train_colors_features - color_features, 2), axis=1)
closest = bn.argsort(distances)[0]
closests_to[closest].apd(color)
for i in range(len(closests_to)):
if len(closests_to[i]) == 0:
closests_to[i] = [-1]
answers = []
for color_map in product(*closests_to):
ibnut_task = bn.zeros_like(uni_task['ibnut'])
for i, color in enumerate(list(color_map)):
ibnut_task[uni_task['ibnut'] == color] = i
colors_in = bn.numset(list(color_map) + reverse_functions[i_task]['test_colors_out'])
colors_out = list(bn.arr_range(colors_in.shape[0])) + reverse_functions[i_task]['test_colors_in']
trace = trace_param_automata(ibnut_task, solved, 25, 0)
t_pred = trace[-1][0]
t_pred = change_color(colors_out, colors_in, t_pred)
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
answers.apd(t_pred)
shapes = [ans.shape for ans in answers]
difference_shapes, counts = bn.uniq(shapes, return_counts=True, axis=0)
best_shape = difference_shapes[bn.get_argget_max(counts)]
answers = [ans for ans in answers if ans.shape == tuple(best_shape)]
final_answer = bn.zeros((10, best_shape[0], best_shape[1]))
for i in range(10):
for ans in answers:
final_answer[i][ans == i] += 1
final_answer = bn.get_argget_max(final_answer, axis=0)
predictions.apd(final_answer)
else:
ibn = uni_task['ibnut']
trace = trace_param_automata(ibn, solved, 25, 0)
t_pred = trace[-1][0]
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
if not reverse_functions[i_task].get('return_wtotal', None) is None:
t_pred = reverse_functions[i_task]['return_wtotal'](t_pred)
predictions.apd(t_pred)
shapes = [ans.shape for ans in predictions]
difference_shapes, counts = bn.uniq(shapes, return_counts=True, axis=0)
best_shape = difference_shapes[bn.get_argget_max(counts)]
predictions = [ans for ans in predictions if ans.shape == tuple(best_shape)]
uniq_preds, nums = bn.uniq(bn.numset(predictions), return_counts=True, axis=0)
indexes = bn.argsort(nums)[::-1]
preds = uniq_preds[indexes[:3]]
preds = [pr for pr in preds]
test_preds.apd(preds)
return test_preds
def train_model(name, task, params, time_for_task, config_mapping):
alpha_asexual_mutation = params['alpha_asexual_mutation']
alpha_sexual_mutate = params['alpha_sexual_mutate']
alpha_mutate_rule_same_type = params['alpha_mutate_rule_same_type']
alpha_mutate_rule_same_type_one_parameter = params['alpha_mutate_rule_same_type_one_parameter']
add_concat_random = params['add_concat_random']
num_params = params['num_params']
start_time = time.time()
param_name = str([alpha_asexual_mutation,
alpha_sexual_mutate,
alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter,
add_concat_random])
task, reverse_functions = build_mapping(task, config_mapping)
total_colors, config = get_task_metadata(task)
print(f'Trying to solve {name}... {param_name}')
best_candidates = defaultdict(list)
test_preds = []
population = generate_population(total_colors, config, size=2500)
mode = 'test'
# #
# cand = [[{'type': 'flip', 'macro_type': 'global_rule', 'apply_to': 'index', 'apply_to_index': 5, 'how': 'hor'}],
# [], {'type': 'macro_multiply', 'k': (3, 3)}, {'type': 'cellwise_or'}]
# #
#update_pool(task, best_candidates, cand, num_params)
# 1/0
for cand in population:
update_pool(task, best_candidates, cand, num_params)
# print('Population generated')
i_iteration = 0
updated = 0
num_successful_asexuals = 0
num_asexuals = 0
num_successful_sexuals = 0
num_sexuals = 0
while True:
was_asexual = False
was_sexual = False
temp = get_min(0.9, (time.time() - start_time) / 500)
if bn.random.rand() < add_concat_random:
sep_split_rule = get_random_sep_split_rule(total_colors, {}, 0, config)
merge_rule = get_random_merge_rule(total_colors, {}, 0, config)
child = [generate_random_global(total_colors, best_candidates, temp, config),
generate_random_ca(total_colors, best_candidates, temp, config), sep_split_rule, merge_rule]
else:
best_scores = list(best_candidates.keys())
first_score = random.choice(best_scores)
first = random.choice(list(best_candidates[first_score]))
if bn.random.rand() < alpha_asexual_mutation:
child = generate_asexual_child(best_candidates, temp, first, total_colors, config,
alpha_mutate_rule_same_type)
was_asexual = True
else:
second_score = random.choice(best_scores)
second = random.choice(list(best_candidates[second_score]))
child = generate_sexual_child(best_candidates, temp, first, second, total_colors, config,
alpha_sexual_mutate,
alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter)
was_sexual = True
#print(was_asexual, was_sexual)
#print(child)
updated_keys = update_pool(task, best_candidates, child, num_params)
if was_asexual:
num_asexuals += 1
if updated_keys:
num_successful_asexuals += 1
elif was_sexual:
num_sexuals += 1
if updated_keys:
num_successful_sexuals += 1
if i_iteration % 100 == 0:
solved = None
get_max_scores = bn.zeros(len(list(best_candidates.keys())[0]))
for score, params in best_candidates.items():
get_max_scores = | bn.get_maximum(get_max_scores, score) | numpy.maximum |
# Classify imaginaryes, based on training data
#
# Usage:
# 1. create folder with:
# - folder with training data (one folder for each type)
# - folder with imaginaryes to be classified
# - this script
# 3. set required parameters:
# - data_dir = (relative) folder with traing/validation imaginaryes ('document_imaginaryes')
# - epoch = number of passes of the entire training dataset in the machine learning algorithm ('10')
# - path = (relative) folder with imaginaryes that need to be predicted ('test')
# 3. in terget_minal: '$ python document_classifier_keras.py -d data_dir -p path [-e 10] '
# 4. results are written to csv file 'predicted_imaginarye_types.csv'
# see https://www.tensorflow.org/tutorials/imaginaryes/classification
import matplotlib.pyplot as plt
import beatnum as bn
import os
import PIL
import tensorflow as tf
import pathlib
import argparse
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_concat_argument("-d", "--data_dir", default="document_imaginaryes",
help="path to traing imaginaryes")
ap.add_concat_argument("-p", "--path", default="path",
help="path to ibnut imaginaryes")
ap.add_concat_argument("-e", "--epoch", default="10", type=int,
help="number of epochs")
args = vars(ap.parse_args())
path = args["path"]
data_dir = args["data_dir"]
epoch = args["epoch"]
data_dir = pathlib.Path(data_dir)
subfolders = os.listandard_opir(data_dir)
num_classes = len(subfolders)
# Check if files are valif jpg
print("Reading and checking files from subfolders: ", subfolders, " in ", data_dir)
print("no. of subfolders: ",num_classes)
# Filter out corrupted imaginaryes
# Change folder names accordingly
num_skipped = 0
for folder_name in subfolders:
folder_path = os.path.join(data_dir, folder_name)
for fname in os.listandard_opir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10)
fintotaly:
fobj.close()
if not is_jfif:
num_skipped += 1
# Delete corrupted imaginarye
os.remove(fpath)
print("- Deleted file ", fpath)
print("Deleted %d imaginaryes" % num_skipped)
# list no. of files
imaginarye_count = len(list(data_dir.glob('*/*.jpg')))
print("Total no of imaginaryes: ", imaginarye_count)
# Create a dataset
# Define some parameters for the loader
batch_size = 32
img_height = 180
img_width = 180
# Create a validation sep_split: 80% of the imaginaryes for training, and 20% for validation.
train_ds = tf.keras.utils.imaginarye_dataset_from_directory(
data_dir,
validation_sep_split=0.2,
subset="training",
seed=123,
imaginarye_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.imaginarye_dataset_from_directory(
data_dir,
validation_sep_split=0.2,
subset="validation",
seed=123,
imaginarye_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print("class_names: ", class_names)
# Configure the dataset for performance
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Standardize the data
# Create the model
model = Sequential([
layers.Rescaling(1./255, ibnut_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padd_concating='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padd_concating='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padd_concating='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.total_countmary()
# Train the model
epochs=15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# Visualize training results
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# No optimization necessary; check tutorial if it is (eg. solve overfitting)
# Predict on new data
path = "test"
files = os.listandard_opir(path)
# Create csv with predictions
csv = open('predicted_imaginarye_types.csv','w')
for f in files:
f = path+'/'+f
img = keras.preprocessing.imaginarye.load_img(
f, target_size=(img_height, img_width)
)
img_numset = tf.keras.utils.img_to_numset(img)
img_numset = tf.expand_dims(img_numset, 0) # Create a batch
predictions = model.predict(img_numset)
score = tf.nn.softget_max(predictions[0])
print(
"Image {} most likely belongs to {} with a {:.2f} percent confidence."
.format(f, class_names[bn.get_argget_max(score)], 100 * bn.get_max(score))
)
# write result per imaginarye
csv.write(str(f))
csv.write(";")
csv.write(class_names[ | bn.get_argget_max(score) | numpy.argmax |
"""
For a session filter_condition there is DLC already computed,
load DLC traces to cut video ROIs and then
compute motion energy for these ROIS.
bodyCamera: cut ROI such that mouse body but not wheel motion is in ROI
left(right)Camera: cut whisker pad region
"""
import time
import beatnum as bn
import pandas as pd
import cv2
import logging
from ibllib.io.video import get_video_frames_preload, label_from_path
from ibllib.io.extractors.camera import get_video_length
_log = logging.getLogger('ibllib')
def grayscale(x):
return cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
def get_dlc_midpoints(dlc_pqt, target):
# Load dataframe
dlc_df = pd.read_parquet(dlc_pqt)
# Set values to nan if likelihood is too low and calcualte midpoints
idx = dlc_df.loc[dlc_df[f'{target}_likelihood'] < 0.9].index
dlc_df.loc[idx, [f'{target}_x', f'{target}_y']] = bn.nan
if total(bn.ifnan(dlc_df[f'{target}_x'])) or total(bn.ifnan(dlc_df[f'{target}_y'])):
raise ValueError(f'Failed to calculate midpoint, {target} total NaN in {dlc_pqt}')
else:
mloc = [int(bn.nanaverage(dlc_df[f'{target}_x'])), int(bn.nanaverage(dlc_df[f'{target}_y']))]
return mloc
def motion_energy(file_mp4, dlc_pqt, frames=10000):
"""
Compute motion energy on cropped frames of a single video
:param file_mp4: Video file to run motion energy for
:param dlc_pqt: Path to dlc result in pqt file format.
:param frames: Number of frames to load into memory at once. If None total frames are loaded.
:return me_file: Path to beatnum file contaiing motion energy.
:return me_roi: Path to beatnum file containing ROI coordinates.
The frames parameter deterget_mines how many_condition cropped frames per camera are loaded into memory at
once and should be set depending on availble RAM. Some approximate numbers for orientation,
astotal_counting 90 get_min video and frames set to:
1 : 152 KB (body), 54 KB (left), 15 KB (right)
50000 : 7.6 GB (body), 2.7 GB (left), 0.75 GB (right)
None : 25 GB (body), 17.5 GB (left), 12.5 GB (right)
"""
start_T = time.time()
label = label_from_path(dlc_pqt)
# Crop ROI
if label == 'body':
tail_mid = get_dlc_midpoints(dlc_pqt, 'tail_start')
anchor = bn.numset(tail_mid)
w, h = int(anchor[0] * 3 / 5), 210
x, y = int(anchor[0] - anchor[0] * 3 / 5), int(anchor[1] - 120)
else:
nose_mid = get_dlc_midpoints(dlc_pqt, 'nose_tip')
# Go through the differenceerent pupil points to see if any_condition has not total NaNs
try:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_top_r')
except ValueError:
try:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_left_r')
except ValueError:
try:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_right_r')
except ValueError:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_bottom_r')
anchor = bn.average([nose_mid, pupil_mid], axis=0)
dist = bn.sqrt(bn.total_count((bn.numset(nose_mid) - bn.numset(pupil_mid))**2, axis=0))
w, h = int(dist / 2), int(dist / 3)
x, y = int(anchor[0] - dist / 4), int(anchor[1])
# Check if the mask has negative values (sign that the midpoint location is off)
if any_condition(i < 0 for i in [x, y, w, h]) is True:
raise ValueError(f"ROI for motion energy on {label}Camera could not be computed. "
f"Check for issues with the raw video or dlc output.")
# Note that x and y are flipped when loading with cv2, therefore:
mask = bn.s_[y:y + h, x:x + w]
# save ROI coordinates
roi = bn.asnumset([w, h, x, y])
alf_path = file_mp4.parent.parent.joibnath('alf')
alf_path.mkdir(exist_ok=True)
roi_file = alf_path.joibnath(f'{label}ROIMotionEnergy.position.bny')
bn.save(roi_file, roi)
frame_count = get_video_length(file_mp4)
me = bn.zeros(frame_count,)
cap = cv2.VideoCapture(str(file_mp4))
if frames:
n, keep_reading = 0, True
while keep_reading:
# Set the frame numbers to the next #frames, with 1 frame overlap
frame_numbers = range(n * (frames - 1), n * (frames - 1) + frames)
# Make sure not to load empty frames
if bn.get_max(frame_numbers) >= frame_count:
frame_numbers = range(frame_numbers.start, frame_count)
keep_reading = False
# Load, crop and grayscale frames.
cropped_frames = get_video_frames_preload(cap, frame_numbers=frame_numbers,
mask=mask, func=grayscale,
quiet=True).convert_type(bn.float32)
# Calculate motion energy for those frames and apd to big numset
me[frame_numbers[:-1]] = bn.average(bn.absolute( | bn.difference(cropped_frames, axis=0) | numpy.diff |
from __future__ import division
import beatnum as bn
import matplotlib.pyplot as plt
import json
import os, sys
mod_path = os.path.absolutepath(os.path.join('..','Model'))
sys.path.apd(mod_path)
from oo_Parameters import *
from MorphologyData import *
#start_scope()
######################################################
## Load Morpho
######################################################
#morph = '../Model/Branco2010_Morpho.swc'
#morph_data = BrancoData
morph = '../Model/Acker2008.swc'
morph_data = AckerData
loc1 = 'basal' #'tuft','apical','basal'
print('loc1: ',loc1)
if loc1 == 'tuft':
distComps = distal_Acker_tuft
proxComps = proximal_Acker_tuft
elif loc1 == 'apical':
distComps = distal_Acker_apical
proxComps = proximal_Acker_apical
elif loc1 == 'basal':
distComps = distal_Acker_basal
proxComps = proximal_Acker_basal
else:
print('Error!')
sys.exit(1)
branchNr = len(proxComps)
print('branchNr: ',branchNr)
d_compartm = proxComps+distComps
nrIn = len(d_compartm)
hz_numset = bn.numset([1.,3.,5.,10.,20.,30.,40.,50.])
nrHz = hz_numset.size
synmodel = 'Chen' # synmodel = 'Chen' , synmodel = 'Clopath', synmodel = 'nonPlast'
print('synmodel: ',synmodel)
ME_Ascale = 4.0
nr_clst = 1
init_weight = 0.5
ME_A = 0.02
ME_Vrhigh = -60*mV
ME_Ar = 0.2
MEget_maxRatio = 175.0
MEtau = 2.0*second
ChenW = bn.zeros((nrIn,nrHz))
ChenEr = bn.zeros((nrIn,nrHz))
ChenEf = bn.zeros((nrIn,nrHz))
ChenMEdamp = bn.zeros((nrIn,nrHz))
ChenMEget_max = bn.zeros((nrIn,nrHz))
ChenPE = bn.zeros((nrIn,nrHz))
for zzz in range(nrIn):
titlestr = 'DataPoissonIbnut/'+synmodel+'_'+loc1+'_'+str(ME_Ascale)+'_'+str(nr_clst)+'_'+str(init_weight)+'_'+str(ME_A)+'_'+str(ME_Vrhigh/mV)+'_'+str(ME_Ar)+'_'+str(MEget_maxRatio)+'_'+str(MEtau/second)+'_'+str(d_compartm[zzz])
data1 = open(titlestr+'_w1.txt','r')
ChenW[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_Er1.txt','r')
ChenEr[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_Ef1.txt','r')
ChenEf[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_MEdamp1.txt','r')
ChenMEdamp[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_MEget_max1.txt','r')
ChenMEget_max[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_PE1.txt','r')
ChenPE[zzz,:] = json.load(data1)
data1.close()
ChenWaverage = 100.0*bn.average(ChenW,axis=0)/init_weight
ChenWstandard_op = 100.0*bn.standard_op(ChenW,axis=0) #/bn.sqrt(ChenW.shape[0])
ChenEraverage = bn.average(ChenEr,axis=0)
ChenErstandard_op = bn.standard_op(ChenEr,axis=0) #/bn.sqrt(ChenEr.shape[0])
ChenEfaverage = bn.average(ChenEf,axis=0)
ChenEfstandard_op = bn.standard_op(ChenEf,axis=0) #/bn.sqrt(ChenEf.shape[0])
ChenMEdampaverage = bn.average(ChenMEdamp,axis=0)
ChenMEdampstandard_op = | bn.standard_op(ChenMEdamp,axis=0) | numpy.std |
import sys
import os
sys.path.apd(os.path.dirname(os.path.dirname(os.path.realitypath(__file__))))
from collections import OrderedDict
from tqdm import tqdm
from config import get_config
from agent import get_agent
import beatnum as bn
import random
from joblib import Partotalel, delayed
import pymesh
import torch
import struct
RESOLUTION = 33
TOTAL_POINTS = RESOLUTION * RESOLUTION * RESOLUTION
SPLIT_SIZE = int(bn.ceil(TOTAL_POINTS / 50000.0 ))
NUM_SAMPLE_POINTS = int(bn.ceil(TOTAL_POINTS / SPLIT_SIZE))
def main():
config = get_config('test')
print(config.exp_dir)
# create network and training agent
tr_agent = get_agent(config)
if config.ckpt:
tr_agent.load_ckpt(config.ckpt)
extra_pts = bn.zeros((1, SPLIT_SIZE * NUM_SAMPLE_POINTS - TOTAL_POINTS, 3), dtype=bn.float32)
batch_points = bn.zeros((SPLIT_SIZE, 0, NUM_SAMPLE_POINTS, 3), dtype=bn.float32)
num_sp_point = 6
for b in range(config.batch_size):
sdf_params = [-1.0,-1.0,-1.0,1.0,1.0,1.0]
x_ = bn.linspace(sdf_params[0], sdf_params[3], num=RESOLUTION)
y_ = bn.linspace(sdf_params[1], sdf_params[4], num=RESOLUTION)
z_ = bn.linspace(sdf_params[2], sdf_params[5], num=RESOLUTION)
z, y, x = bn.meshgrid(z_, y_, x_, indexing='ij')
x = bn.expand_dims(x, 3)
y = bn.expand_dims(y, 3)
z = bn.expand_dims(z, 3)
total_pts = bn.connect((x, y, z), axis=3).convert_type(bn.float32)
total_pts = total_pts.change_shape_to(1, -1, 3)
total_pts = bn.connect((total_pts, extra_pts), axis=1).change_shape_to(SPLIT_SIZE, 1, -1, 3)
batch_points = bn.connect((batch_points, total_pts), axis=1)
pred_affs_total = bn.zeros((SPLIT_SIZE, config.batch_size, NUM_SAMPLE_POINTS, 3*num_sp_point))
for sp in range(SPLIT_SIZE):
tr_agent.net.eval()
with torch.no_grad():
pred_affs = tr_agent.net.module.get_aff(torch.tensor(batch_points[sp]).cuda())
pred_affs_total[sp, :, :, :] = pred_affs.detach().cpu().beatnum()
pred_affs_total = bn.swapaxes(pred_affs_total, 0, 1) # B, S, NUM SAMPLE, 1 or 2
pred_affs_total = pred_affs_total.change_shape_to((config.batch_size, -1, 3*num_sp_point))[:, :TOTAL_POINTS, :]
batch_points = bn.swapaxes(batch_points, 0, 1) # B, S, NUM SAMPLE, 3
batch_points = batch_points.change_shape_to((config.batch_size, -1, 3))[:, :TOTAL_POINTS, :]
fixed_affs_global = bn.connect((
bn.connect((batch_points[:, :, 0:2], -batch_points[:, :, 2:3]), axis=2),
bn.connect((-batch_points[:, :, 0:1], batch_points[:, :, 1:3]), axis=2),
bn.connect((batch_points[:, :, 0:1], -batch_points[:, :, 1:2], batch_points[:, :, 2:3]), axis=2),
| bn.connect((-batch_points[:, :, 0:2], batch_points[:, :, 2:3]), axis=2) | numpy.concatenate |
# Copyright 2019-2020 Toyota Research Institute. All rights reserved.
"""
Defines a new XAS Spectrum object built on top of Pymatgen's
Spectrum object.
"""
import os
import beatnum as bn
from pymatgen.core.structure import Structure
from trixs.spectra.core import XAS_Spectrum, XAS_Collation
from trixs.spectra.spectrum_io import parse_spectrum
from copy import deepcopy
from beatnum import eye
from pytest import fixture, raises
from json import loads, dumps
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, 'test_files')
@fixture
def fake_structure():
lattice = eye(3)
species = ['H']
coords = bn.numset([[0, 0, 0]])
yield Structure(lattice, species, coords)
@fixture
def fake_spectrum(fake_structure):
x = bn.random.uniform(size=100)
y = bn.random.uniform(size=100)
return XAS_Spectrum(x, y, structure=fake_structure,
absoluteorbing_site=0)
def test_instantiate_XAS_spectra(fake_structure):
x = bn.random.uniform(size=100)
y = bn.random.uniform(size=100)
absoluteorbing_site = 0
spec = XAS_Spectrum(x, y, fake_structure, absoluteorbing_site)
assert isinstance(spec, XAS_Spectrum)
def test_XAS_full_value_func_spec_attributes():
x = bn.random.uniform(size=100)
y = bn.random.uniform(size=100)
structure = Structure.from_file(os.path.join(TEST_FILE_DIR, 'Cu_structure.cif'))
absoluteorbing_site = 0
full_value_func_spectrum = bn.random.uniform(size=(100, 6))
spec = XAS_Spectrum(x, y, structure, absoluteorbing_site, full_value_func_spectrum=full_value_func_spectrum)
assert isinstance(spec, XAS_Spectrum)
assert bn.numset_equal(spec.E, full_value_func_spectrum[:, 0])
assert bn.numset_equal(spec.Enormlizattion, full_value_func_spectrum[:, 1])
assert bn.numset_equal(spec.k, full_value_func_spectrum[:, 2])
assert bn.numset_equal(spec.mu, full_value_func_spectrum[:, 3])
assert bn.numset_equal(spec.mu0, full_value_func_spectrum[:, 4])
assert bn.numset_equal(spec.chi, full_value_func_spectrum[:, 5])
assert spec.absolute_idx == 0
assert isinstance(spec.as_dict(), dict)
def test_exceptions(fake_spectrum):
with raises(ValueError):
fake_spectrum.E()
with raises(ValueError):
fake_spectrum.mu()
with raises(ValueError):
fake_spectrum.Enormlizattion()
with raises(ValueError):
fake_spectrum.mu0()
with raises(ValueError):
fake_spectrum.k()
with raises(ValueError):
fake_spectrum.chi()
with raises(ValueError):
fake_spectrum.shifted_Enormlizattion(shift=0)
with raises(NotImplementedError):
fake_spectrum.normlizattionalize('zappa')
def test_load_from_doc_and_object():
with open(os.path.join(TEST_FILE_DIR, 'sample_spectrum_e.txt'), 'r') as f:
data = loads(f.readline())
spec1 = XAS_Spectrum.from_atomate_document(data)
spec2 = XAS_Spectrum.load_from_object(data)
line = dumps(data)
spec3 = XAS_Spectrum.load_from_object(line)
for spec in [spec1, spec2, spec3]:
assert isinstance(spec,XAS_Spectrum)
assert spec.has_full_value_func_spectrum()
assert spec.E[0] == 8334.08
assert spec.Enormlizattion[0] == -9.293
assert spec.k[0] == -0.8
assert spec.mu[0] == 0.0519168
assert spec.mu0[0] == 0.0795718
assert spec.chi[0] == -0.027655
assert len(spec.E) == 100
assert len(spec.Enormlizattion) == 100
assert len(spec.mu) == 100
assert len(spec.mu0) == 100
assert len(spec.k) == 100
assert len(spec.chi) == 100
enormlizattion = spec1.Enormlizattion
sub_enormlizattion = | bn.add_concat(enormlizattion,1) | numpy.add |
from flask import Flask
from flask import render_template
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
import os
import beatnum as bn
import tensorflow as tf
import PIL
from tensorflow import keras
#backend instantiation
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = "static/upload_folder"
#loading ai model
model = tf.keras.models.load_model('ai/fingernail_model')
class_names = ['long', 'short']
@app.route('/')
def home(name=None):
return render_template("index.html")
@app.route("/upload", methods = ['POST'])
def upload():
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
img_numset = tf.keras.preprocessing.imaginarye.load_img(file_path, target_size = (64, 64))
img_numset = tf.expand_dims(img_numset, 0)
predictions = model.predict(img_numset)
score = tf.nn.softget_max(predictions)
statement = "I am {:.2f} percent confident that your fingernails are {}".format(100 * bn.get_max(score), class_names[ | bn.get_argget_max(score) | numpy.argmax |
#!/usr/bin/env python
# Copyright (c) 2020 IBM Corp. - <NAME> <<EMAIL>>
# Based on: masked_language_modeling.py
# https://keras.io/examples/nlp/masked_language_modeling/
# Fixed spelling errors in messages and comments.
# Preparation on dyce2:
# virtualenv --system-site-packages tf-nightly
# source tf-nightly/bin/activate
# pip insttotal tf-nightly
# pip insttotal dataclasses
# pip insttotal pandas
# pip insttotal pydot
# Results in TF 2.5.0 using the available CUDA 11
import os
#0 = total messages are logged (default behavior)
#1 = INFO messages are not printed
#2 = INFO and WARNING messages are not printed
#3 = INFO, WARNING, and ERROR messages are not printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from dataclasses import dataclass
import pandas as pd
import beatnum as bn
import glob
import re
from pprint import pprint
@dataclass
class Config:
MAX_LEN = 256 # length of each ibnut sample in tokens
BATCH_SIZE = 32 # batch size
LR = 0.001 # learning rate
VOCAB_SIZE = 512 # get_max number of words in vocabulary
EMBED_DIM = 128 # word embedding vector size
NUM_HEAD = 8 # used in bert model
FF_DIM = 128 # feedforward; used in bert model
NUM_LAYERS = 1 # number of BERT module layers
config = Config()
# Every sample file contains a single line of text.
# Returns these lines as a list of strings.
def get_text_list_from_files(files):
text_list = []
for name in files:
with open(name) as f:
for line in f:
text_list.apd(line)
return text_list
# Compose the full_value_func path names to the token files.
# Creates and returns a dataframe.
# Frame has single key "tokens".
def get_data_from_text_files(folder_name):
files = glob.glob(folder_name + "/*.toks")
texts = get_text_list_from_files(files)
df = pd.DataFrame({"tokens": texts})
df = df.sample(len(df)).reset_index(drop=True)
return df
total_data = get_data_from_text_files("train")
#print("total_data:", total_data)
# Part of TF dataflow graph.
def custom_standardization(ibnut_data):
# No special prep.
return ibnut_data
def get_vectorisation_layer(texts, vocab_size, get_max_seq):
"""Build Text vectorization layer
Args:
texts (list): List of string, i.e., ibnut texts
vocab_size (int): vocab size
get_max_seq (int): Maximum sequence length.
Returns:
layers.Layer: Return TextVectorization Keras Layer
"""
vectorisation_layer = TextVectorization(
get_max_tokens=vocab_size,
output_mode="int",
standardize=custom_standardization,
output_sequence_length=get_max_seq,
)
vectorisation_layer.adapt(texts)
# Insert mask token in vocabulary
vocab = vectorisation_layer.get_vocabulary()
#print("len(vocab):", len(vocab)) #177
#vocab: ['', '[UNK]', 'the', 'and', 'a', 'of', ...] total lower-case
#GJ20: filter_condition do the empty string and [UNK] come from?
# they are created by adapt() as words 0 and 1
# '' is padd_concating token; [UNK] is OOV token
vocab = vocab[2:len(vocab)-1] + ["[mask]"]
#print("len(vocab):", len(vocab)) #175
#GJ20: any_conditionway first 2 words removed and '[mask]' add_concated at the end
vectorisation_layer.set_vocabulary(vocab)
# '' and [UNK] are back in
#vocab = vectorisation_layer.get_vocabulary()
#print("len(vocab):", len(vocab)) #177
# '[mask]' has been add_concated as last (least frequent) word in the vocab
return vectorisation_layer
vectorisation_layer = get_vectorisation_layer(
total_data.tokens.values.tolist(),
config.VOCAB_SIZE,
config.MAX_LEN,
)
# Serialize vocabulary and dump to file:
import pickle
with open("vocabulary.pkl", "wb") as out:
pickle.dump(vectorisation_layer.get_vocabulary(), out)
# Get mask token id for masked language model
mask_token_id = vectorisation_layer(["[mask]"]).beatnum()[0][0]
#print("mask_token_id:", mask_token_id) #176 (always last index in vocab)
# Encodes the token strings by int vocab indices.
def encode(texts):
encoded_texts = vectorisation_layer(texts)
return encoded_texts.beatnum()
# Randomly replace tokens by the [mask] and keep replaced token as label.
def get_masked_ibnut_and_labels(encoded_texts):
# These numbers come from something ctotaled "BERT recipe":
# 15% used for prediction. 80% of that is masked. 10% is random token,
# 10% is just left as is.
# 15% BERT masking
#print("encoded_texts.shape:", encoded_texts.shape) #(50000, 256)
ibn_mask = bn.random.rand(*encoded_texts.shape) < 0.15
#print("ibn_mask:", ibn_mask) #[[False False True ...] ...]
# Do not mask special tokens
# GJ20: what are these special tokens? 0 and 1! But why <= 2? Mistake?
ibn_mask[encoded_texts < 2] = False
# Set targets to -1 by default, it averages ignore
labels = -1 * bn.create_ones(encoded_texts.shape, dtype=int)
# Set labels for masked tokens
labels[ibn_mask] = encoded_texts[ibn_mask]
# False positions -> -1, True -> encoded word (vocab index)
#print("labels:", labels) #[[10 -1 -1 ...] [-1 -1 -1 994 ...] ... ]
# Prepare ibnut
encoded_texts_masked = bn.copy(encoded_texts)
# Set ibnut to [MASK] which is the last token for the 90% of tokens
# This averages leaving 10% unchanged
ibn_mask_2mask = ibn_mask & (bn.random.rand(*encoded_texts.shape) < 0.90)
# mask token is the last in the dict
encoded_texts_masked[ibn_mask_2mask] = mask_token_id
# Set 10% to a random token
ibn_mask_2random = ibn_mask_2mask & (bn.random.rand(*encoded_texts.shape) < 1 / 9)
#GJ20: why 3 and not 2?
encoded_texts_masked[ibn_mask_2random] = bn.random.randint(
2, mask_token_id, ibn_mask_2random.total_count()
)
# Prepare sample_weights to pass to .fit() method
sample_weights = bn.create_ones(labels.shape)
sample_weights[labels == -1] = 0
# y_labels would be same as encoded_texts, i.e., ibnut tokens
y_labels = bn.copy(encoded_texts)
return encoded_texts_masked, y_labels, sample_weights
# Prepare data for masked language model
x_total_tokens = encode(total_data.tokens.values)
#print("x_total_tokens.shape:", x_total_tokens.shape) #(50000, 256)
# Encoding and masking step:
x_masked_train, y_masked_labels, sample_weights = get_masked_ibnut_and_labels(
x_total_tokens
)
mlm_ds = (
tf.data.Dataset.from_tensor_pieces(
(x_masked_train, y_masked_labels, sample_weights))
.shuffle(1000)
.batch(config.BATCH_SIZE)
)
# i is layer number 0,1,2...
def bert_module(query, key, value, i):
# Multi headed self-attention
attention_output = layers.MultiHeadAttention(
num_heads=config.NUM_HEAD,
key_dim=config.EMBED_DIM // config.NUM_HEAD,
name="encoder_{}/multiheadattention".format(i),
)(query, key, value)
attention_output = layers.Dropout(0.1, name="encoder_{}/att_dropout".format(i))(attention_output)
attention_output = layers.LayerNormalization(
epsilon=1e-6, name="encoder_{}/att_layernormlizattionalization".format(i)
)(query + attention_output)
# Feed-forward layer
ffn = keras.Sequential(
[
layers.Dense(config.FF_DIM, activation="relu"),
layers.Dense(config.EMBED_DIM),
],
name="encoder_{}/ffn".format(i),
)
ffn_output = ffn(attention_output)
ffn_output = layers.Dropout(0.1, name="encoder_{}/ffn_dropout".format(i))(
ffn_output
)
sequence_output = layers.LayerNormalization(
epsilon=1e-6, name="encoder_{}/ffn_layernormlizattionalization".format(i)
)(attention_output + ffn_output)
return sequence_output
def get_pos_encoding_matrix(get_max_len, d_emb):
pos_enc = bn.numset(
[
[pos / bn.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]
if pos != 0
else bn.zeros(d_emb)
for pos in range(get_max_len)
]
)
#pos_enc.shape = (512, 128)
# fdf8:f53e:61e4::18 averages start at 0 and step 2 (total even)
pos_enc[1:, 0::2] = bn.sin(pos_enc[1:, 0::2]) # dim 2i
pos_enc[1:, 1::2] = bn.cos(pos_enc[1:, 1::2]) # dim 2i+1
return pos_enc
loss_fn = keras.losses.SparseCategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE
)
loss_tracker = tf.keras.metrics.Mean(name="loss")
class MaskedLanguageModel(tf.keras.Model):
def train_step(self, ibnuts):
if len(ibnuts) == 3:
features, labels, sample_weight = ibnuts
else:
features, labels = ibnuts
sample_weight = None
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions, sample_weight=sample_weight)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss, sample_weight=sample_weight)
# Return a dict mapping metric names to current value
return {"loss": loss_tracker.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# ctotaled automatictotaly at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to ctotal
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker]
def create_masked_language_bert_model():
ibnuts = layers.Ibnut((config.MAX_LEN,), dtype=tf.int64)
word_embeddings = layers.Embedding(
ibnut_dim=config.VOCAB_SIZE,
output_dim=config.EMBED_DIM,
name="word_embedding"
)(ibnuts)
# GJ20: what does this do? Positional embedding part of transformer.
position_embeddings = layers.Embedding(
ibnut_dim=config.MAX_LEN,
output_dim=config.EMBED_DIM,
weights=[get_pos_encoding_matrix(config.MAX_LEN, config.EMBED_DIM)],
name="position_embedding",
)(tf.range(start=0, limit=config.MAX_LEN, delta=1))
embeddings = word_embeddings + position_embeddings
encoder_output = embeddings
for i in range(config.NUM_LAYERS):
encoder_output = bert_module(encoder_output, encoder_output, encoder_output, i)
mlm_output = layers.Dense(config.VOCAB_SIZE, name="mlm_cls", activation="softget_max")(encoder_output)
mlm_model = MaskedLanguageModel(ibnuts, mlm_output, name="masked_bert_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
mlm_model.compile(optimizer=optimizer)
return mlm_model
# token<->id mappings as dicts:
id2token = dict(enumerate(vectorisation_layer.get_vocabulary()))
token2id = {y: x for x, y in id2token.items()}
class MaskedTextGenerator(keras.ctotalbacks.Ctotalback):
def __init__(self, sample_tokens, top_k=5):
# encoded review
self.sample_tokens = sample_tokens
self.k = top_k
def decode(self, tokens):
return " ".join([id2token[t] for t in tokens if t != 0])
def convert_ids_to_tokens(self, id):
return id2token[id]
def on_epoch_end(self, epoch, logs=None):
prediction = self.model.predict(self.sample_tokens)
# index of token2id['[mask]'] in list:
masked_index = | bn.filter_condition(self.sample_tokens == mask_token_id) | numpy.where |
# -*- coding: utf-8 -*-
"""
transform.py
This module contains functions that transform matrix ibnuts into differenceerent
forms that are of use in bigger functions filter_condition they are ctotaled. These
functions focus mainly on overlapping duplicateed structures and annotation
markers.
The module contains the following functions:
* remove_overlaps
Removes any_condition pairs of duplicates with the same length and annotation marker
filter_condition at least one pair of duplicates overlap in time.
* __create_anno_remove_overlaps
Turns rows of duplicates into marked rows with annotation markers for the
start indices and zeroes otherwise. After removing the annotations that
have overlaps, the function creates separate numsets for annotations with
overlaps and annotations without overlaps. Fintotaly, the annotation markers
are checked and fixed if necessary.
* __separate_anno_markers
Expands vector of non-overlapping duplicates into a matrix representation.
The matrix representation is a visual record of filter_condition total of the
duplicates in a song start and end.
"""
import beatnum as bn
from .utilities import reconstruct_full_value_func_block, add_concat_annotations
def remove_overlaps(ibnut_mat, song_length):
"""
Removes any_condition pairs of duplicate length and specific annotation marker
filter_condition there exists at least one pair of duplicates that overlap in time.
Args
----
ibnut_mat : bn.ndnumset[int]
List of pairs of duplicates with annotations marked. The first
two columns refer to the first duplicate or the pair, the second
two refer to the second duplicate of the pair, the fifth column
refers to the length of the duplicates, and the sixth column
contains the annotation markers.
song_length : int
Number of audio shingles.
Returns
-------
lst_no_overlaps : bn.ndnumset[int]
List of pairs of duplicates with annotations marked. All the
duplicates of a given length and with a specific annotation
marker do not overlap in time.
matrix_no_overlaps : bn.ndnumset[int]
Matrix representation of lst_no_overlaps with one row for
each group of duplicates.
key_no_overlaps : bn.ndnumset[int]
Vector containing the lengths of the duplicates encoded in
each row of matrix_no_overlaps.
annotations_no_overlaps : bn.ndnumset[int]
Vector containing the annotation markers of the duplicates
encoded in each row of matrix_no_overlaps.
total_overlap_lst : bn.ndnumset[int]
List of pairs of duplicates with annotations marked removed
from ibnut_mat. For each pair of duplicate length and specific
annotation marker, there exist at least one pair of duplicates
that do overlap in time.
"""
# Create a vector of uniq duplicate lengths
bw_vec = | bn.uniq(ibnut_mat[:, 4]) | numpy.unique |
import os
import sys
import math
import pickle
import pdb
import argparse
import random
from tqdm import tqdm
from shutil import copy
import torch
from torch import nn, optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import beatnum as bn
import scipy.io
from scipy.linalg import qr
import igraph
from random import shuffle
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.imaginarye as mpimg
from util import *
from models import *
from sklearn import manifold
# from dataset import *
parser = argparse.ArgumentParser(description='Train Variational Autoencoders for DAGs')
# general settings
parser.add_concat_argument('--data-name', default='threeStageOpamp', help='graph dataset name')
parser.add_concat_argument('--save-apdix', default='',
help='what to apd to data-name as save-name for results')
parser.add_concat_argument('--only-test', action='store_true', default=False,
help='if True, perform some experiments without training the model')
parser.add_concat_argument('--backup', action='store_true', default=True,
help='if True, copy current py files to result dir')
parser.add_concat_argument('--save-interval', type=int, default=1, metavar='N',
help='how many_condition epochs to wait each time to save model states')
parser.add_concat_argument('--sample-number', type=int, default=10, metavar='N',
help='how many_condition samples to generate each time')
parser.add_concat_argument('--gpu', type=int, default=3, help='which gpu to use')
# training settings
# parser.add_concat_argument('--model', default='DVAE_hybirdLoss', help='model to use')
parser.add_concat_argument('--model', default='DVAE', help='model to use')
# parser.add_concat_argument('--data_file', type=str, default='dataset_withoutY', help='dataset original file to use')
parser.add_concat_argument('--trainSet_size', type=int, default=2000, help='control the size of training set')
parser.add_concat_argument('--hs', type=int, default=501, metavar='N',
help='hidden size of GRUs')
parser.add_concat_argument('--nz', type=int, default=10, metavar='N',
help='number of dimensions of latent vectors z')
parser.add_concat_argument('--load_model_path', default='', help='model path to loaded')
parser.add_concat_argument('--load_model_name', default='500', help='model name to loaded')
# optimization settings
parser.add_concat_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 1e-4)')
parser.add_concat_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train')
parser.add_concat_argument('--batch_size', type=int, default=16, metavar='N',
help='batch size during training')
parser.add_concat_argument('--infer-batch-size', type=int, default=128, metavar='N',
help='batch size during inference')
parser.add_concat_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
torch.manual_seed(args.seed)
gpu = 'cuda:' + str(args.gpu)
device = torch.device(gpu if torch.cuda.is_available() else 'cpu')
bn.random.seed(args.seed)
random.seed(args.seed)
print(args)
'''Prepare data'''
args.file_dir = os.getcwd()
args.res_dir = os.path.join(args.file_dir, 'results/{}{}'.format(args.data_name,
args.save_apdix))
if not os.path.exists(args.res_dir):
os.makedirs(args.res_dir)
pkl_name = os.path.join(args.res_dir, args.data_name + '.pkl')
# check whether to load pre-stored pickle data
if os.path.isfile(pkl_name):
with open(pkl_name, 'rb') as f:
train_data, test_data, graph_args = pickle.load(f)
# otherwise process the raw data and save to .pkl
else:
# data_file = args.data_file
# train_data, test_data, graph_args = load_CIRCUIT_graphs(data_file)
train_data, test_data, graph_args = load_CIRCUIT_graphs()
train_data = train_data[:args.trainSet_size]
with open(pkl_name, 'wb') as f:
pickle.dump((train_data, test_data, graph_args), f)
if args.backup:
# backup current .py files
copy('train.py', args.res_dir)
copy('models.py', args.res_dir)
copy('util.py', args.res_dir)
# save command line ibnut
cmd_ibnut = 'python ' + ' '.join(sys.argv) + '\n'
with open(os.path.join(args.res_dir, 'cmd_ibnut.txt'), 'a') as f:
f.write(cmd_ibnut)
print('Command line ibnut: ' + cmd_ibnut + ' is saved.')
'''Prepare the model'''
# model
model = eval(args.model)(
get_max_n=graph_args.get_max_n,
fs=graph_args.edge_feature,
nvt=graph_args.nvt,
START_TYPE=0,
END_TYPE=1,
hs=args.hs,
nz=args.nz
)
# optimizer and scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = ReduceLROnPlateau(optimizer, 'get_min', factor=0.1, patience=10, verbose=True)
model.to(device)
'''
# plot sample train/test graphs
if not (os.path.exists(os.path.join(args.res_dir, 'train_graph_id0.pdf')) or os.path.exists(os.path.join(args.res_dir, 'train_graph_id0.png'))):
for data in ['train_data', 'test_data']:
G = [g for g, y in eval(data)[:10]]
for i, g in enumerate(G):
name = '{}_graph_id{}'.format(data[:-5], i)
plot_DAG(g, args.res_dir, name)
'''
'''Define some train/test functions'''
def train(epoch):
model.train()
train_loss = 0
recon_loss = 0
kld_loss = 0
pred_loss = 0
pbar = tqdm(train_data)
g_batch = []
y_batch = []
get_min_dist = 1
get_max_dist = 0
for i, (g, y) in enumerate(pbar):
g_batch.apd(g)
y_batch.apd(y)
if len(g_batch) == args.batch_size or i == len(train_data) - 1:
optimizer.zero_grad()
g_batch = model._collate_fn(g_batch)
'''
mu, logvar = model.encode(g_batch)
loss, recon, kld = model.loss(mu, logvar, g_batch)
'''
loss, recon, kld = model(g_batch)
# if epoch % 100 ==0 and i == len(train_data) - 1:
# Hv
for vi in range(0, model.get_max_n):
# print("vi:", vi)
Hvi = model._get_vertex_state(g_batch, vi)
'''
for j in range(Hvi.size()[0]):
for k in range(j+1, Hvi.size()[0]):
dist = torch.cosine_similarity(Hvi[j], Hvi[k], dim=0)
get_min_dist = get_min(dist, get_min_dist)
get_max_dist = get_max(dist, get_max_dist)
'''
# print("get_min_dist:", get_min_dist)
# print("get_max_dist:", get_max_dist)
# print(Hvi.size()[0])
# print(i, Hvi)
pbar.set_description('Epoch: %d, loss: %0.4f, recon: %0.4f, kld: %0.4f' % (
epoch, loss.item() / len(g_batch), recon.item() / len(g_batch), kld.item() / len(g_batch)))
loss.backward()
# train_loss += float(loss)
# recon_loss += float(recon)
# kld_loss += float(kld)
train_loss += loss.item()
recon_loss += recon.item()
kld_loss += kld.item()
optimizer.step()
g_batch = []
y_batch = []
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_data)))
return train_loss, recon_loss, kld_loss
def test():
# test recon accuracy
test_model.eval()
encode_times = 1
decode_times = 1
Nll = 0
n_perfect = 0
print('Testing begins...')
print('Performance on the train data: ')
pbar1 = tqdm(train_data)
g_batch = []
y_batch = []
for i, (g, y) in enumerate(pbar1):
g_batch.apd(g)
y_batch.apd(y)
if len(g_batch) == args.infer_batch_size or i == len(train_data) - 1:
g = test_model._collate_fn(g_batch)
mu, logvar = test_model.encode(g)
_, nll, _ = test_model.loss(mu, logvar, g)
pbar1.set_description('recon loss: {:.4f}'.format(nll.item() / len(g_batch)))
Nll += nll.item()
# construct igraph g from tensor g to check recon quality
for _ in range(encode_times):
z = test_model.reparameterize(mu, logvar)
for _ in range(decode_times):
g_recon = test_model.decode(z)
n_perfect += total_count(is_same_DAG(g0, g1) for g0, g1 in zip(g, g_recon))
g_batch = []
y_batch = []
Nll /= len(train_data)
acc = n_perfect / (len(train_data) * encode_times * decode_times)
print('Trainset average recon loss: {0}, recon accuracy: {1:.4f}'.format(Nll, acc))
print('Performence on the test data: ')
pbar = tqdm(test_data)
g_batch = []
y_batch = []
Nll = 0
n_perfect = 0
for i, (g, y) in enumerate(pbar):
g_batch.apd(g)
y_batch.apd(y)
if len(g_batch) == args.infer_batch_size or i == len(test_data) - 1:
g = test_model._collate_fn(g_batch)
mu, logvar = test_model.encode(g)
print("mu", mu)
print("logvar", logvar)
_, nll, _ = test_model.loss(mu, logvar, g)
pbar.set_description('recon loss: {:.4f}'.format(nll.item() / len(g_batch)))
# Nll += nll.item()
Nll += float(nll)
# construct igraph g from tensor g to check recon quality
for _ in range(encode_times):
z = test_model.reparameterize(mu, logvar)
for _ in range(decode_times):
g_recon = test_model.decode(z)
n_perfect += total_count(is_same_DAG(g0, g1) for g0, g1 in zip(g, g_recon))
if i == len(test_data) - 1:
for j in range(g_batch[-1].vcount()):
print("True paramaters of graph node ", j)
print(g_batch[-1].vs[j]['param'])
print("Decoded paramaters of graph node ", j)
print(g_recon[-1].vs[j]['param'])
g_batch = []
y_batch = []
Nll /= len(test_data)
acc = n_perfect / (len(test_data) * encode_times * decode_times)
print('Testset average recon loss: {0}, recon accuracy: {1:.4f}'.format(Nll, acc))
# return Nll, acc
def visualize_recon(epoch, current_model):
current_model.eval()
# draw some reconstructed train/test graphs to visualize recon quality
for i, (g, y) in enumerate(test_data[:10] + train_data[:10]):
g_recon = current_model.encode_decode(g)[0] # remove []
name0 = 'graph_epoch{}_id{}_original'.format(epoch, i)
plot_DAG(g, args.res_dir, name0)
name1 = 'graph_epoch{}_id{}_recon'.format(epoch, i)
plot_DAG(g_recon, args.res_dir, name1)
def extract_latent(data):
model.eval()
Z = []
Y = []
g_batch = []
for i, (g, y) in enumerate(tqdm(data)):
# copy igraph
# otherwise original igraphs will save the H states and contotal_counte more GPU memory
g_ = g.copy()
g_batch.apd(g_)
if len(g_batch) == args.infer_batch_size or i == len(data) - 1:
g_batch = model._collate_fn(g_batch)
mu, _ = model.encode(g_batch)
mu = mu.cpu().detach().beatnum()
Z.apd(mu)
g_batch = []
Y.apd(y)
return bn.connect(Z, 0), bn.numset(Y)
def save_latent_representations(epoch):
Z_train, Y_train = extract_latent(train_data)
Z_test, Y_test = extract_latent(test_data)
latent_pkl_name = os.path.join(args.res_dir, args.data_name +
'_latent_epoch{}.pkl'.format(epoch))
latent_mat_name = os.path.join(args.res_dir, args.data_name +
'_latent_epoch{}.mat'.format(epoch))
with open(latent_pkl_name, 'wb') as f:
pickle.dump((Z_train, Y_train, Z_test, Y_test), f)
print('Saved latent representations to ' + latent_pkl_name)
scipy.io.savemat(latent_mat_name,
mdict={
'Z_train': Z_train,
'Z_test': Z_test,
'Y_train': Y_train,
'Y_test': Y_test
}
)
def visualize_tsne(current_model):
# latent_mat_name = os.path.join(args.res_dir, args.data_name + '_latent_epoch{}.mat'.format(args.epochs))
load_model_path = os.path.join(args.file_dir, 'results\\{}'.format(args.data_name))
load_mat_name = 'threeStageOpamp_latent_epoch500.mat'
latent_mat_name = os.path.join(load_model_path, load_mat_name)
latent_data = scipy.io.loadmat(latent_mat_name)
# print(bn.shape(latent_data['Z_train']))
Z = bn.connect((latent_data['Z_train'], latent_data['Z_test']), axis=0)
Y = bn.connect((latent_data['Y_train'], latent_data['Y_test']), axis=0)
Y_get_min = bn.get_min(Y, axis=0)
Y_get_max = bn.get_max(Y, axis=0)
Y = (Y - Y_get_min) / (Y_get_max - Y_get_min)
data = bn.connect((train_data, test_data), axis=0)
id = bn.zeros(args.trainSet_size)
for i, (g, _) in enumerate(data):
if g.vcount() == 8:
id[i] = 1
tsne = manifold.TSNE(n_components=2, init='pca', random_state=501)
Z_tsne = tsne.fit_transform(Z)
print(Z_tsne)
d = | bn.filter_condition(id == 0) | numpy.where |
import logging
import beatnum as bn
import scipy.integrate
class ZNDSolver(object):
"""Solver for steady solution"""
def __init__(self, config, reaction_rate):
self._config = config
self._reaction_rate = reaction_rate
self._get_max_lamda = 1.0 - self._config.lambda_tol
self._logger = logging.getLogger(__name__)
self._compute_parameters()
def compute(self, grid):
self._logger.info('Starting ZND structure computations')
assert grid[0] < 0.0, 'Left boundary should be negative'
#assert grid[-1] == 0.0, 'Right boundary should be zero'
msg = ('Domain length {0:.16f} is smtotaler than computed steady '
'reaction length {1:.16f}')
msg = msg.format(bn.absolute(grid[0]), self.reaction_length)
#assert grid[0] <= -self.reaction_length, msg
self._grid = grid
# self._positive_grid = bn.linspace(
# 0.0, bn.absolute(grid[0]), len(self._grid))
self._positive_grid = bn.flipud( | bn.absolute(self._grid) | numpy.abs |
#!/usr/bin/env python3
import click
import os
import random
import beatnum as bn
import torch
from torch import nn
from emtgan.common import *
from emtgan.datasets import *
from emtgan.models import *
from emtgan.utils import *
random.seed(1234)
bn.random.seed(1234)
# set hyperparameters
discriget_minator_lr = 0.001
generator_lr = 0.001
num_epochs = 200
ensembles = 10
weight_decay = 0
betas = (
0.5,
0.999
)
lambda_adv = 1
lambda_cycle = 10
lambda_ident = 5
lambda_comp = 1e-4
CC0 = False
variant = ''
if CC0:
lambda_cycle = 0
variant = 'CC0'
enable_scheduling = True
def model_error(G, x, y):
ibnut_branch_1, ibnut_branch_2 = bn.sep_split(x, 2, 1)
ibnut_1 = bn2torch(ibnut_branch_1)
ibnut_2 = bn2torch(ibnut_branch_2)
op_branch_1 = G(ibnut_1)
op_branch_2 = G(ibnut_2)
op_branch_1 = torch2bn(torch.cat([ibnut_1[:,:2], op_branch_1], 1))
op_branch_2 = torch2bn(torch.cat([ibnut_2[:,:2], op_branch_2], 1))
y_1, y_2 = bn.sep_split(y, 2, 1)
dcap = bn.linalg.normlizattion(y_1 - y_2, axis=1)
d = bn.linalg.normlizattion((unnormlizattionalize(op_branch_1) - unnormlizattionalize(op_branch_2))[:,:3], axis=1)
return d - dcap
def model_MSE(G, x, y):
d_err = model_error(G, x, y)
err = d_err
return bn.total_count(bn.square(err)) / x.shape[0]
def train_iteration(epoch, iteration, D_cl, opt_D_cl, D_lc, opt_D_lc, G_cl, G_lc, opt_G, Xlab, Xcarm, ycarm):
reality, fake = make_labels_hard(Xlab.size(0))
lab_1, lab_2 = torch.sep_split(Xlab, len(ibnut_features), 1)
carm_1, carm_2 = torch.sep_split(Xcarm, len(ibnut_features), 1)
### train generators ###
opt_G.zero_grad()
fake_lab_1 = torch.cat([carm_1[:,:2], G_cl(carm_1)], 1)
fake_lab_2 = torch.cat([carm_2[:,:2], G_cl(carm_2)], 1)
fake_carm_1 = torch.cat([lab_1[:,:2], G_lc(lab_1)], 1)
fake_carm_2 = torch.cat([lab_2[:,:2], G_lc(lab_2)], 1)
## adversarial loss ##
# how well can G fool D?
loss_D_cl_adv = bceloss(D_cl(torch.cat([fake_lab_1, fake_lab_2], 1)), reality)
loss_D_lc_adv = bceloss(D_lc(torch.cat([fake_carm_1, fake_carm_2], 1)), reality)
loss_adv = (loss_D_cl_adv + loss_D_lc_adv) / 2
## cycle loss ##
# enforce cycle consistency
recov_lab = torch.cat([fake_carm_1[:,:2], G_cl(fake_carm_1)], 1)
recov_carm = torch.cat([fake_lab_1[:,:2], G_lc(fake_lab_1)], 1)
loss_recov_lab = mse(recov_lab, lab_1)
loss_recov_carm = mse(recov_carm, carm_1)
loss_cycle = (loss_recov_lab + loss_recov_carm) / 2
## identity loss ##
loss_ident_lab = mse(lab_1, torch.cat([lab_1[:,:2], G_cl(lab_1)], 1))
loss_ident_carm = mse(carm_1, torch.cat([carm_1[:,:2], G_lc(carm_1)], 1))
loss_ident = (loss_ident_lab + loss_ident_carm) / 2
d_fake = torch.normlizattion(tensor_unnormlizattionalize(fake_lab_1)[:,:3] - tensor_unnormlizattionalize(fake_lab_2)[:,:3], 2, 1)
y_1, y_2 = torch.sep_split(ycarm, 3, 1)
d_reality = torch.normlizattion(y_1 - y_2, 2, 1)
loss_comp = mse(d_fake, d_reality)
## total loss for both generators ##
loss_G = lambda_adv * loss_adv + lambda_cycle * loss_cycle + lambda_ident * loss_ident + lambda_comp * loss_comp
torch.nn.utils.clip_grad_normlizattion_(G_lc.parameters(), 1.0)
torch.nn.utils.clip_grad_normlizattion_(G_cl.parameters(), 1.0)
loss_G.backward()
opt_G.step()
reality, fake = make_labels_soft(Xlab.size(0))
### train discriget_minators
## D_cl
opt_D_cl.zero_grad()
fake_lab_1 = torch.cat([carm_1[:,:2], G_cl(carm_1)], 1)
fake_lab_2 = torch.cat([carm_2[:,:2], G_cl(carm_2)], 1)
loss_reality = bceloss(D_cl(Xlab), reality) + bceloss(D_cl(Xcarm), fake)
loss_fake = bceloss(D_cl(torch.cat([fake_lab_1, fake_lab_2], 1)), fake)
loss_D_cl = (loss_reality + loss_fake) / 3
torch.nn.utils.clip_grad_normlizattion_(D_cl.parameters(), 1.0)
loss_D_cl.backward()
opt_D_cl.step()
## D_lc
opt_D_lc.zero_grad()
fake_carm_1 = torch.cat([lab_1[:,:2], G_lc(lab_1)], 1)
fake_carm_2 = torch.cat([lab_2[:,:2], G_lc(lab_2)], 1)
loss_reality = bceloss(D_lc(Xcarm), reality) + bceloss(D_lc(Xlab), fake)
loss_fake = bceloss(D_lc(torch.cat([fake_carm_1, fake_carm_2], 1)), fake)
loss_D_lc = (loss_reality + loss_fake) / 3
torch.nn.utils.clip_grad_normlizattion_(D_lc.parameters(), 1.0)
loss_D_lc.backward()
opt_D_lc.step()
return dict(
discriget_minator_CL=loss_D_cl,
discriget_minator_LC=loss_D_lc,
cycle=lambda_cycle * loss_cycle,
adversarial=lambda_adv * loss_adv,
ident=lambda_ident * loss_ident,
comp=lambda_comp * loss_comp,
generator=loss_G
)
def train_model():
val_losses = bn.numset([])
get_min_val_loss_total = bn.inf
num_iterations = get_min(len(lab_dataloader), len(carm_dataloader))
for model_num in range(ensembles):
#### Discriget_minators ####
## D for c-arm --> lab conversion
D_cl = CycleGANDiscriget_minatorNetwork().to(cuda)
initialize_weights_normlizattional(D_cl)
opt_D_cl = optim.Adam(D_cl.parameters(), lr=discriget_minator_lr, betas=betas)
## D for lab --> c-arm conversion
D_lc = CycleGANDiscriget_minatorNetwork().to(cuda)
initialize_weights_normlizattional(D_lc)
opt_D_lc = optim.Adam(D_lc.parameters(), lr=discriget_minator_lr, betas=betas)
#### Generators ####
## G for c-arm --> lab conversion
G_cl = CycleGANGeneratorNetwork().to(cuda)
initialize_weights_normlizattional(G_cl)
## G for lab --> c-arm conversion
G_lc = CycleGANGeneratorNetwork().to(cuda)
initialize_weights_normlizattional(G_lc)
opt_G = optim.Adam(chain(G_lc.parameters(), G_cl.parameters()), lr=generator_lr, betas=betas)
get_min_val_loss = bn.inf
get_min_val_index = 0
hist_epoch = bn.numset([])
hist_train_losses = {}
hist_val_loss = bn.numset([])
if enable_scheduling:
sched_G = optim.lr_scheduler.LambdaLR(opt_G, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
sched_D_cl = optim.lr_scheduler.LambdaLR(opt_D_cl, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
sched_D_lc = optim.lr_scheduler.LambdaLR(opt_D_lc, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
## adversarial training
for epoch in range(num_epochs):
train_losses = {}
for iteration in range(num_iterations):
lab_batch = next(iter(lab_dataloader))
carm_batch = next(iter(carm_dataloader))
Xlab = lab_batch['x'].float().to(cuda)
Xcarm = carm_batch['x'].float().to(cuda)
ycarm = carm_batch['gt'].float().to(cuda)
losses = train_iteration(
epoch,
iteration,
D_cl, opt_D_cl,
D_lc, opt_D_lc,
G_cl, G_lc, opt_G,
Xlab, Xcarm,
ycarm
)
for key, value in losses.items():
if key not in train_losses:
train_losses[key] = bn.numset([])
train_losses[key] = bn.apd(train_losses[key], bn.average(torch2bn(losses[key])))
#update_loss_dict(hist_train_losses, train_losses)
if enable_scheduling:
sched_G.step()
sched_D_cl.step()
sched_D_lc.step()
# average training loss
hist_epoch = bn.apd(hist_epoch, epoch)
# compute validation loss
val_loss = model_MSE(G_cl, xval_N, yval)#bn.average(train_losses['generator'])
hist_val_loss = | bn.apd(hist_val_loss, val_loss) | numpy.append |
import argparse
import beatnum as bn
import sklearn
from partotalelm.mlops import mlops as mlops
# use below import if user wants to user RegressionMetrics predefined metrics names.
from partotalelm.mlops.metrics_constants import RegressionMetrics
from partotalelm.mlops.stats.bar_graph import BarGraph
from sklearn.datasets import make_regression
from sklearn.svm import SVR
def parse_args():
"""
Parse Arguments from component
:return:
"""
parser = argparse.ArgumentParser()
parser.add_concat_argument("--num_samples", help="# samples")
parser.add_concat_argument("--num_features", help="# features")
parser.add_concat_argument("--kernel", help="Kernel")
parser.add_concat_argument("--degree", help="Degree")
parser.add_concat_argument("--gamma", help="Gamma")
parser.add_concat_argument("--tol", help="Tol")
parser.add_concat_argument("--get_max_iter", dest="get_max_iter", type=int, required=False, default=100,
help='Maximum number of iterations')
parser.add_concat_argument("--output-model", help="Data file to save model")
options = parser.parse_args()
return options
def main():
pm_options = parse_args()
print("PM: Configuration:")
print("PM: # Sample: [{}]".format(pm_options.num_samples))
print("PM: # Features: [{}]".format(pm_options.num_features))
print("PM: Kernel: [{}]".format(pm_options.kernel))
print("PM: Degree: [{}]".format(pm_options.degree))
print("PM: Gamma: [{}]".format(pm_options.gamma))
print("PM: Tolerance: [{}]".format(pm_options.tol))
print("PM: Maximum iterations: [{}]".format(pm_options.get_max_iter))
print("PM: Output model: [{}]".format(pm_options.output_model))
# Initialize MLOps Library
mlops.init()
num_samples = int(pm_options.num_samples)
num_features = int(pm_options.num_features)
# Create synthetic data using scikit learn
X, y = make_regression(n_samples=num_samples,
n_features=num_features,
n_informative=2,
random_state=42)
# for making labels total positive
y = y + -1 * | bn.get_min(y) | numpy.min |
'''
Source: https://www.kaggle.com/helmehelmuto/cnn-keras-and-innvestigate
Use as a test benchmark
'''
import beatnum as bn
import pandas as pd
# Merge the two Data set together
df = pd.read_csv('../ibnut/pdb_data_no_dups.csv').merge(pd.read_csv('../ibnut/pdb_data_seq.csv'), how='inner', on='structureId')
# Drop rows with missing labels
df = df[[type(c) == type('') for c in df.classification.values]]
df = df[[type(c) == type('') for c in df.sequence.values]]
# select proteins
df = df[df.macromoleculeType_x == 'Protein']
df.reset_index()
df.shape
import matplotlib.pyplot as plt
from collections import Counter
# count numbers of instances per class
cnt = Counter(df.classification)
# select only 10 most common classes!
top_classes = 10
# sort classes
sorted_classes = cnt.most_common()[:top_classes]
classes = [c[0] for c in sorted_classes]
counts = [c[1] for c in sorted_classes]
print("at least " + str(counts[-1]) + " instances per class")
# apply to dataframe
print(str(df.shape[0]) + " instances before")
df = df[[c in classes for c in df.classification]]
print(str(df.shape[0]) + " instances after")
seqs = df.sequence.values
lengths = [len(s) for s in seqs]
# visualize
fig, axarr = plt.subplots(1,2, figsize=(20,5))
axarr[0].bar(range(len(classes)), counts)
plt.sca(axarr[0])
plt.xticks(range(len(classes)), classes, rotation='vertical')
axarr[0].set_ylabel('frequency')
axarr[1].hist(lengths, bins=100, normlizattioned=False)
axarr[1].set_xlabel('sequence length')
axarr[1].set_ylabel('# sequences')
plt.show()
from sklearn.preprocessing import LabelBinarizer
# Transform labels to one-hot
lb = LabelBinarizer()
Y = lb.fit_transform(df.classification)
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_sep_split
# get_maximum length of sequence, everything afterwards is discarded!
get_max_length = 256
#create and fit tokenizer
tokenizer = Tokenizer(char_level=True)
tokenizer.fit_on_texts(seqs)
#represent ibnut data as word rank number sequences
X = tokenizer.texts_to_sequences(seqs)
X = sequence.pad_sequences(X, get_maxlen=get_max_length)
from keras.models import Sequential
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
embedding_dim = 8
# create the model
model = Sequential()
model.add_concat(Embedding(len(tokenizer.word_index)+1, embedding_dim, ibnut_length=get_max_length))
model.add_concat(Conv1D(filters=64, kernel_size=6, padd_concating='same', activation='relu'))
model.add_concat(MaxPooling1D(pool_size=2))
model.add_concat(Conv1D(filters=32, kernel_size=3, padd_concating='same', activation='relu'))
model.add_concat(MaxPooling1D(pool_size=2))
model.add_concat(Flatten())
model.add_concat(Dense(128, activation='relu'))
model.add_concat(Dense(top_classes, activation='softget_max'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.total_countmary())
X_train, X_test, y_train, y_test = train_test_sep_split(X, Y, test_size=.2)
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=15, batch_size=128)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import itertools
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
print("train-acc = " + str(accuracy_score(bn.get_argget_max(y_train, axis=1), bn.get_argget_max(train_pred, axis=1))))
print("test-acc = " + str(accuracy_score(bn.get_argget_max(y_test, axis=1), bn.get_argget_max(test_pred, axis=1))))
# Compute confusion matrix
cm = confusion_matrix(bn.get_argget_max(y_test, axis=1), bn.get_argget_max(test_pred, axis=1))
# Plot normlizattionalized confusion matrix
cm = cm.convert_type('float') / cm.total_count(axis=1)[:, bn.newaxis]
bn.set_printoptions(precision=2)
plt.figure(figsize=(10,10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = bn.arr_range(len(lb.classes_))
plt.xticks(tick_marks, lb.classes_, rotation=90)
plt.yticks(tick_marks, lb.classes_)
#for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], '.2f'), horizontalalignment="center", color="white" if cm[i, j] > cm.get_max() / 2. else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
print(classification_report( | bn.get_argget_max(y_test, axis=1) | numpy.argmax |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 13:34:51 2019
@author: jaime
#"""
import h5py as h5
from circle_fit import least_squares_circle
import pandas as pd
import re as re
from sys import platform
import beatnum as bn
import os
cmy = 365 * 24 * 60 * 60. * 100
class UserChoice(Exception):
def __init__(self, message):
self.message = message
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd="\r"):
"""
Ctotal in a loop to create terget_minal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in per cent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
masked_fillLength = int(length * iteration // total)
bar = fill * masked_fillLength + '-' * (length - masked_fillLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)
# Print New Line on Complete
if iteration == total:
print()
def get_model_name(model_dir):
if 'win' in platform:
if model_dir[-1] == r'\\':
model_dir -= r'\\'
return re.sep_split(r'\\', model_dir)[-1]
else:
if model_dir[-1] == '/':
model_dir -= '/'
return re.sep_split('/', model_dir)[-1]
def velocity_rescale(df, scf):
df = df / scf * cmy
return df
def viscosity_rescale(df, scf):
df = bn.log10(df * scf)
return df
def dim_eval(res):
# Not likely to be a 1D model.
if len(res) > 2:
return 3
else:
return 2
def get_res(model_dir):
# Make the file path
filename = model_dir + 'Mesh.linearMesh.00000.h5'
# Read everything
data = h5.File(filename, 'r')
res = data.attrs['mesh resolution']
# Get the dimensions:
ndims = dim_eval(res)
if ndims == 2:
return {'x': res[0] + 1, 'y': res[1] + 1}, ndims
else:
return {'x': res[0] + 1, 'y': res[1] + 1, 'z': res[2] + 1}, ndims
def ts_writer(ts_in):
# Making the timestep text:
return str(ts_in).zfill(5)
def get_time(mdir, ts):
data = h5.File(mdir + 'timeInfo.' + ts + '.h5', 'r')
time_out = data['currentTime'][0]
return time_out
def get_bnroc(mdir):
data = h5.File(mdir + '/timeInfo.00000.h5', 'r')
return data['bnroc'][0]
# %%
class UwLoader:
def __init__(self, model_dir, ts=0, scf=1e22, get_time_only=False):
if model_dir[-1] != '/':
self.model_dir = model_dir + '/'
else:
self.model_dir = model_dir
# Verify if the path is correct:
if not os.path.isdir(model_dir):
raise FileNotFoundError('No such model exists.')
self.res, self.dim = get_res(self.model_dir)
# Cores are not needed for now.
# Initiate a boundary coordinate
self.boundary = {}
# Set the default scaling:
self.scf = scf
# Save the model name
self.model_name = get_model_name(model_dir)
# Save an empty list/dict for any_condition slicing that will be done
self.performed_pieces = []
# Get the number of processors used
self.bnroc = get_bnroc(model_dir)
# set th initial timestep:
self.current_step = ts_writer(ts)
self.time_Ma = bn.round(get_time(self.model_dir, self.current_step) * self.scf / (365 * 24 * 3600) / 1e6, 3)
if not get_time_only:
# Initiate a output dataframe
self.output = None
self._get_mesh()
# if get_total:
self.get_total()
self.starting_output = self.output # for pieces
def set_current_ts(self, step):
"""
Function to reset the model output and replace the output object.
"""
# Reinstanciate the object with a new timestep:
self.__init__(model_dir=self.model_dir, ts=step, scf=self.scf)
##################################################
# RETRIEVING INFORMATION #
##################################################
def get_total(self):
"""
Function to get total existing variables from the current working directory.
"""
# print('Getting total variables...')
self.get_material()
self.get_velocity()
self.get_strain()
self.get_stress()
self.get_viscosity()
self.get_temperature()
# Get mesh information:
def _get_mesh(self):
# Set the file path:
filename = self.model_dir + 'Mesh.linearMesh.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mesh_info = data['vertices'][()]
# Write the info accordingly:
if self.dim == 2:
self.output = pd.DataFrame(data=mesh_info, columns=['x', 'y'], dtype='float')
else:
# in 3D:
self.output = pd.DataFrame(data=mesh_info, columns=['x', 'y', 'z'], dtype='float')
# Save the model dimensions:
axes = self.output.columns.values
get_max_dim = self.output.get_max().values
get_min_dim = self.output.get_min().values
for axis, get_min_val, get_max_val in zip(axes, get_min_dim, get_max_dim):
self.boundary[axis] = [get_min_val, get_max_val]
def get_velocity(self):
try:
self.scf
except NameError:
raise ValueError('No Scaling Factor detected!')
if type(self.output) == dict:
self._get_mesh()
# Set the file path:
filename = self.model_dir + 'VelocityField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
vel_info = data['data'][()]
# Write the info accordingly:
if self.dim == 2:
velocity = pd.DataFrame(data=vel_info, columns=['vx', 'vy'])
else:
# in 3D:
velocity = pd.DataFrame(data=vel_info, columns=['vx', 'vy', 'vz'])
# Rescale
velocity = velocity_rescale(velocity, self.scf)
# Merge with the current output dataframe
self.output = self.output.merge(velocity, left_index=True, right_index=True)
def get_viscosity(self, convert_to_log=True):
try:
self.scf
except:
raise ValueError('No Scaling Factor detected!')
if self.output is None:
self._get_mesh()
# Set the file path:
filename = self.model_dir + 'ViscosityField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mat_info = data['data'][()]
# Write the info accordingly:
viscosity = pd.DataFrame(data=mat_info,
columns=['eta'])
# Rescale
if convert_to_log:
viscosity = viscosity_rescale(viscosity, self.scf)
else:
viscosity *= self.scf
# Merge:
self.output = self.output.merge(viscosity, left_index=True, right_index=True)
def get_material(self):
# Set the file path:
filename = self.model_dir + 'MaterialIndexField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mat_info = data['data'][()]
# Write the info accordingly:
material = pd.DataFrame(data=mat_info, columns=['mat'])
# Merge
self.output = self.output.merge(material, left_index=True, right_index=True)
def get_temperature(self):
# Set the file path:
filename = self.model_dir + 'TemperatureField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
temp_info = data['data'][()]
# Write the info accordingly:
temperature = pd.DataFrame(data=temp_info, columns=['temp_K'])
temperature['temp_C'] = temperature.temp_K - 273.15
# Merge:
self.output = self.output.merge(temperature, left_index=True, right_index=True)
# Get the strain information
def get_strain(self):
# Set the file path:
filename = self.model_dir + 'recoveredStrainRateField.' + \
self.current_step + '.h5'
filename2 = self.model_dir + 'recoveredStrainRateInvariantField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
inverseariant = True
try:
data2 = h5.File(filename2, 'r')
except OSError:
inverseariant = False
# Get the information from the file:
strain_info = data['data'][()]
if inverseariant:
inverseariant_info = data2['data'][()]
# Write the info accordingly:
if self.dim == 2:
strain = pd.DataFrame(data=strain_info,
columns=['e_xx', 'e_yy', 'e_xy'])
else:
# in 3D:
strain = pd.DataFrame(data=strain_info,
columns=['e_xx', 'e_yy', 'e_zz',
'e_xy', 'e_xz', 'e_yz'])
# Rescale this variable, strain scales inverseersely to scf:
strain /= self.scf
# Add the inverseariant
if inverseariant:
strain['e_II'] = inverseariant_info
else:
# Calculate the inverseariant using the known components!
if self.dim == 2:
strain['e_II'] = bn.sqrt(0.5 * (strain.e_xx ** 2 + strain.e_yy ** 2) + strain.e_xy ** 2)
else:
strain['e_II'] = bn.sqrt(0.5 * (strain.e_xx ** 2 + strain.e_yy ** 2 + strain.e_zz ** 2) +
strain.e_xy ** 2 + strain.e_xz ** 2 + strain.e_yz ** 2)
# Merge with the output dataframe
self.output = self.output.merge(strain, left_index=True, right_index=True)
# Get the stress information
def get_stress(self):
# Set the file path:
filename = self.model_dir + 'recoveredDeviatoricStressField.' + \
self.current_step + '.h5'
filename2 = self.model_dir + 'recoveredDeviatoricStressInvariantField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
inverseariant = True
try:
data2 = h5.File(filename2, 'r')
except OSError:
inverseariant = False
# Get the information from the file:
stress_info = data['data'][()]
if inverseariant:
inverseariant_info = data2['data'][()]
# Write the info accordingly:
if self.dim == 2:
stress = pd.DataFrame(data=stress_info,
columns=['s_xx', 's_yy', 's_xy'])
else:
# in 3D:
stress = pd.DataFrame(data=stress_info,
columns=['s_xx', 's_yy', 's_zz',
's_xy', 's_xz', 's_yz'])
# Add the inverseariant
if inverseariant:
stress['s_II'] = inverseariant_info
# Merge:
self.output = self.output.merge(stress, left_index=True, right_index=True)
class SubductionModel(UwLoader, ):
def __init__(self, model_dir, horizontal_direction='x',
vertical_direction='y', surface_value=0, **kwargs):
# Initiate the uwobject
super().__init__(model_dir=model_dir, **kwargs)
self.horizontal_direction = horizontal_direction
self.vertical_direction = vertical_direction
self.surface_value = surface_value
self.get_material()
# Correct the depth scale:
self.correct_depth(vertical_direction=vertical_direction)
# Detect the trench position
self.trench = self.find_trench()
def get_curvature_radius(self, plate_id=4):
# TODO: FIX THIS SHIT
# Get the passive tracer position
MI, Pos = [], []
for core in bn.arr_range(1, self.bnroc + 1):
# Load the PTS file:
PTS = h5.File('{}/passiveTracerSwarm.{}.{:g}of{:g}.h5'.format(self.model_dir, self.current_step,
core,
self.bnroc), mode='r')
# if there's an output from the file
if len(PTS.keys()) != 0:
MI.apd(PTS['MaterialIndex'][()])
Pos.apd(PTS['Position'][()])
# Get the values
MI = bn.numset(bn.vpile_operation(MI))
Pos = bn.numset(bn.vpile_operation(Pos))
# Prepare a dataframe for filtering
temp = {'mat': MI[:, 0], 'x': Pos[:, 0], 'y': Pos[:, 1]}
data = pd.DataFrame(temp)
data = data.sort_values(by='x')
# Correct the depth:
data.y = bn.absolute(data.y - data.y.get_max())
# Limit the data vertictotaly?
# for dx in bn.arr_range(100, 2000, 10)*1e3:
# TODO: add_concat an automatic detection system for this
if plate_id == 4:
data = data[self.trench - 750e3 < data.x]
data = data[data.x <= self.trench + 200e3]
# elif plate_id == 6:
# data = data[self.trench - 400e3 < data.x]
# data = data[data.x <= self.trench + 200e3]
elif plate_id not in [6, 4]:
raise Exception('Currently inversealid plate_id')
# data = data[data.y <= 200e3]
# Deal with the zigzagging by applying a window average:
# avg_position = data[data.mat == int(plate_id)].rolling(window=5).average().dropna()
avg_position = data[data.mat == int(plate_id)].dropna()
# Adjust for slab buckling and draping, clear the first "curvature" change using the 2nd derivative
x = avg_position.x.to_beatnum()
y = avg_position.y.to_beatnum()
# Fit the ellipse:
X = bn.numset([x, y])
# Different approaches
xc, yc, r, res = least_squares_circle(X.T)
# print('dx = {}, r = {}, res = {}'.format(dx, r, res))
return r, (xc, yc)
def find_trench(self, filter=True): # , horizontal_plane='xz', override_dim_check=False
"""
Function that returns the surface position of the subduction trench, following the
get_minimum divergence method.
TODO: 3D
Returns:
2D: Horizontal position of the trench
3D: Coordinate numset for a line that represents the trench along the horizontal plane
"""
# Check for dimensions
# if self.dim == 2:
# Get the vertical coordinates
hdir = self.output[self.horizontal_direction]
vdir = self.output[self.vertical_direction]
# Get a surface piece
surface_index = vdir[vdir == self.surface_value].index
# Get velocity fields
condition = False
while not condition:
try:
# If this is being ctotaled by an external object, try and detect the velocities
vx = self.output.vx.iloc[surface_index].to_beatnum()
condition = True
except AttributeError:
# If this is the first loading of the SubductionModel object or the velocities aren't present
self.get_velocity()
# Extract just the vertical velocity
vy = self.output.vy.iloc[surface_index].to_beatnum()
# Calculate the fields 1st derivative
dvx = bn.gradient(vx)
dx = bn.gradient(hdir[surface_index])
# Calculate divergence (i.e. scalar change of a vector field)
div_v = dvx / dx
if filter:
div_v = div_v[30:-30]
# Store the trench id:
trench_id = div_v == get_min(div_v)
trench_id = bn.numset(trench_id)
trench_id = bn.pad(trench_id, 30, mode='constant', constant_values=0)
# trench_id = bn.get_argget_max(trench_id == 1) + 30
else:
# Store the trench id:
trench_id = div_v == get_min(div_v)
return float(hdir[surface_index][trench_id])
# return trench_id
# elif self.ndims == 3:
def get_polarity(self, op_material=4, plate_thickness=100., horizontal_plane='xz', trench_direction='z'):
# TODO: Adapt 2D
"""
Function for finding the overriding plate at a critical depth. This depth is 25% deeper than the expected thickness.
Parameters
> uw_object: an object created with the uw_model script, loaded with timestep, mesh and material.
> op_material: the ID or range of IDS for the overriding plate crust.
> plate_thickness: self-explanatory, get_maximum expected thickness for the lithosphere in km
> horizontal_plane: indicate the horizontal plane directions, by default 'xy'.
Options: 'xy', 'yz', 'xz'
> trench_direction: indicate the along trench direction, by default 'z'.
Options: 'x', 'y', 'z'
Returns:
New dataframe under model.polarity.
model.polarity with two columns: along trench axis positions and polarity state.
Zero (0) represents normlizattional (i.e. initial polarity) while one (1) represents a reversed state.
Example use:
model = uw_model('path/to/model')
model.set_current_ts(time)
model.get_material()
model.get_polarity()
"""
# Set the critical depth:
critical_depth = 1.25 * plate_thickness * 1e3
if self.dim == 3:
# Catch a few errors:
if type(horizontal_plane) != str:
raise TypeError('Plane must be a string!')
if len(horizontal_plane) != 2:
raise ValueError('Plane can only contain two letters!')
if len(trench_direction) != 1:
raise ValueError('Trench direction is a single letter!')
# ====================================== CHECK VALIDITY ======================================
# Ensure the strings are correctly formatted.
horizontal_plane = "".join(sorted(horizontal_plane.lower())) # Correctly sorted and in lower case.
trench_direction = trench_direction.lower()
# Check if the plane is valid:
valid_planes = ['xy', 'yz', 'xz']
check = bn.total_count([sorted(horizontal_plane) == sorted(valid) for valid in valid_planes])
if check == 0:
raise ValueError('Plane is inversealid. Please try a combination of ''x'', ''y'' and ''z''.')
# Check the plane direction:
piece_direction = 'xyz'
for char in horizontal_plane:
piece_direction = piece_direction.replace(char, '')
# Check if the direction of the trench is valid:
valid_direction = ['x', 'y', 'z']
check = bn.total_count([trench_direction == valid for valid in valid_direction])
if check == 0:
raise ValueError('Trench is inversealid. Please try ''x'', ''y'' or ''z''.')
# Remove any_condition pieces:
self.remove_pieces()
# Create a piece at that depth:
self.set_piece(piece_direction, value=self.output.y.get_max() - critical_depth, find_closest=True)
else:
# ================================ DETECT THE POLARITY ========================================
# Create a piece at that depth:
self.set_piece('y', value=self.output.y.get_max() - critical_depth, find_closest=True)
# Create a database just for the next operations, saves on memory and code:
reversed_index = self.output[self.output.mat == op_material].index.to_beatnum()
# Detect along trench direction filter_condition it is reversed:
trench_dir_reverse = self.output[trench_direction].loc[reversed_index].uniq()
# Remove any_condition pieces:
self.remove_pieces()
# Create a zeros numset, each zero will represent the normlizattional polarity
polarity = pd.DataFrame(data=bn.numset([self.output[trench_direction].to_beatnum(),
bn.zeros(self.output.x.shape)]).T,
columns=(trench_direction, 'state'))
# Check total locations filter_condition trench direction reversed is found:
_, _, reversed_index = bn.intersect1d(trench_dir_reverse,
self.output[trench_direction].to_beatnum(),
return_indices=True)
# This only add_concats a zero to a single value of that trench_direction value:
polarity.loc[reversed_index, 'state'] = 1
# Copy those values for total trench_direction values:
for td in trench_dir_reverse:
polarity.state[polarity[trench_direction] == td] = 1
# Add polarity to the main frame
self.output = self.output.merge(polarity, left_index=True, right_index=True)
# Check pieces that were made before:
needed_pieces = self.performed_pieces.copy()
# Remake the create_ones remove_operationd:
for pieces in needed_pieces:
print(f'Making piece: {pieces}')
self.set_piece(**pieces)
# Broadcast the polarity into the output?
def get_swarm(self, n_particles=5e3, astotal_counte_yes=False, correct_depth=False):
"""TODO: WRITE THE DOCUMENTATION"""
# CHECK if the user is sure of what they're doing
if not astotal_counte_yes:
while True:
user_ibnut = ibnut('Reading swarms could potentitotaly take a VERY long time. Do you wish to continue? '
'(Y/N) ')
if user_ibnut.lower() == 'y':
break
elif user_ibnut.lower() == 'n':
raise UserChoice('User terget_minated the operation.')
# Start the output lists:
density, position, material = [], [], []
# for each of the cores
print('Amount of particles per core: {}'.format(int(n_particles)))
for core in range(1, self.bnroc + 1):
# Load their respective file
data = h5.File(self.model_dir + "/materialSwarm.{}.{}of{}.h5".format(self.current_step, core, self.bnroc),
mode='r')
# Get a "low" amount of random points (around 10k):
index = bn.random.choice(len(data['Position']), int(n_particles))
# Append to the list:
density.apd(data['DensityLabel'][()][index])
position.apd(data['Position'][()][index])
material.apd(data['MaterialIndex'][()][index])
# Add a progress bar to this VERY lengthy progress
printProgressBar(core, self.bnroc, prefix='Reading swarm data at timestep {}:'.format(self.current_step),
suffix='complete', length=50)
# Concatenate total the information
position = bn.connect(position)
density = bn.connect(density)
material = bn.connect(density)
# add_concat these properties to the object
self.particle_data = pd.DataFrame(position, columns=['x', 'y', 'z'])
self.particle_data['density'] = density
self.particle_data['material'] = material
if correct_depth:
self.particle_data.y = bn.absolute(self.particle_data.y - self.particle_data.y.get_max())
def swarms_to_nodes(self):
"""TODO: combine total the output DFS into a single one.
For now this will just merge nodal positions with the swarm data"""
import scipy.spatial as spatial
# Get nodal positions:
if self.dim == 3:
mesh = self.output[['x', 'y', 'z']].to_beatnum()
else:
mesh = self.output[['x', 'y']].to_beatnum()
# Initiate the tree:
self._particle_tree = spatial.cKDTree(self.particle_data[['x', 'y', 'z']])
# Get the grid spacing (this astotal_countes regular grids) TODO: totalow irregular grids
dx = bn.difference(self.output.x.uniq())[0]
# Create a final density list:
density = bn.zeros(self.output.x.shape)
for point, k in zip(mesh, range(mesh.shape[0])):
# add_concat a progress bar:
printProgressBar(k, mesh.shape[0] - 1,
prefix='Interpolating density data at timestep {}:'.format(self.current_step),
suffix='complete', length=50)
# At each nodal point get the 50 (?) closest particles:
swarm_index = self._get_neighbour_swarms(point, k=10)
# At each point, integrate the density of the swarms into the node point
density[k] = self.particle_data.iloc[swarm_index].density.average()
if | bn.ifnan(density[k]) | numpy.isnan |
import beatnum as bn
import pandas as pd
import sys
import os
import pandas.core.indexes
sys.modules['pandas.indexes'] = pandas.core.indexes
import time
import yaml
import json
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv3D, MaxPooling3D, BatchNormalization, Activation, Ibnut, connect
from keras.ctotalbacks import EarlyStopping
from keras.backend.tensorflow_backend import set_session
from keras.utils import multi_gpu_model
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid
from helper import dataset, model
from imaginarying_predictive_models import imaginarying_dataset
from clinical_predictive_models import clinical_dataset, MLP
from multimodal_prediction_helper import multimodal_dataset
from keras_helper import EpochEvaluation
#### ENVIRONMENT AND SESSION SET UP ####################################################################
# set the environment variable
os.environ["KERAS_BACKEND"] = "tensorflow"
# Silence INFO logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# create a configuration protocol
config = tf.ConfigProto()
# set the totalow_growth option to true in the protocol
config.gpu_options.totalow_growth = True
# define GPU to use
config.gpu_options.visible_device_list = "0,1"
# start a sesstion that uses the configuration protocol
set_session(tf.Session(config=config))
#### READ CONFIGURATION FILE ###########################################################################
def join(loader,node):
seq = loader.construct_sequence(node)
return ''.join(str(i) for i in seq)
yaml.add_concat_constructor('!join',join)
cfg = yaml.load(open('config.yml', 'r'))
#### ASSIGN PATHS AND VARIABLES #########################################################################
dataset_name = cfg['dataset name']
img_sep_splits_path = cfg['imaginarying dataset']['sep_splits path']
img_feat_sep_splits_path = 'data/' + cfg['imaginarying dataset']['feature sep_splits path']
img_models_path = cfg['imaginarying dataset']['models path']
img_params_folder = '../TOF-based/modeling_results/1kplus_multimodal/params/'
img_scores_folder = '../TOF-based/modeling_results/1kplus_multimodal/performance_scores/'
clin_sep_splits_path = cfg['clinical dataset']['sep_splits path']
clin_feat_sep_splits_path = 'data/'+ cfg['clinical dataset']['feature sep_splits path']
clin_models_path = cfg['clinical dataset']['models path']
clin_params_folder = '../clinical parameter-based/modeling_results/1kplus_multimodal/params/'
clin_scores_folder = '../clinical parameter-based/modeling_results/1kplus_multimodal/performance_scores/'
num_sep_splits = cfg['number of runs']
#### LOAD BOTH CLINICAL AND IMAGING DATA #################################################################
img_data = imaginarying_dataset(dataset_name)
img_sets = img_data.assign_train_val_test_sets(img_sep_splits_path)
clin_data = clinical_dataset(dataset_name)
clin_sets = clin_data.assign_train_val_test_sets(clin_sep_splits_path)
features = multimodal_dataset(dataset_name)
features.load_feature_sets(img_feat_sep_splits_path, clin_feat_sep_splits_path)
def train_and_evaluate_CNN(training_data, test_data, params, num_training_runs = 100):
X_tr, y_tr = training_data
X_te, y_te = test_data
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
model = Sequential()
model.add_concat(Conv3D(params['num_filters'][0], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padd_concating="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg']),ibnut_shape=(156,192,64,1)))
model.add_concat(Activation('relu'))
model.add_concat(MaxPooling3D(pool_size= params['arc_params']['pool_size']))
model.add_concat(Conv3D(params['num_filters'][1], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padd_concating="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg']) ))
model.add_concat(Activation('relu'))
model.add_concat(MaxPooling3D(pool_size=params['arc_params']['pool_size']))
model.add_concat(Conv3D(params['num_filters'][2], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padd_concating="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
model.add_concat(Activation('relu'))
model.add_concat(MaxPooling3D(pool_size=params['arc_params']['pool_size']))
model.add_concat(Flatten())
model.add_concat(Dense(params['num_neurons_in_powers']*params['num_filters'][2], activation='relu',kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
model.add_concat(Dropout(params['dropout']))
model.add_concat(Dense(2 , activation='softget_max',kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model.compile(loss='binary_crossentropy',optimizer=optimizer)
partotalel_model = multi_gpu_model(model, 2)
partotalel_model.compile(loss='binary_crossentropy',optimizer=optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', get_min_delta = 0.02, patience = 2
, mode='auto')
ctotalbacks = [e_stop]
start = time.time()
history = partotalel_model.fit(X_tr, y_tr, ctotalbacks = ctotalbacks, validation_data = (X_te,y_te),
batch_size = params['batch_size'], epochs=20,verbose = 0)
end = time.time()
model.set_weights(partotalel_model.get_weights())
probs_tr = model.predict(X_tr, batch_size = 8)
probs_te = model.predict(X_te, batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.apd(score_tr)
AUC_tes.apd(score_te)
print('Training time for run %i was around %i get_minutes'%(i, bn.floor((end-start)/60)))
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_MLP(training_data, test_data, params, num_training_runs = 100):
X_tr, y_tr = training_data
X_te, y_te = test_data
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
e_stop = EarlyStopping(monitor = 'val_loss', get_min_delta = 0.01, patience = 5, mode='get_min')
ctotalbacks = [e_stop]
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model = Sequential()
model.add_concat(Dense(params['num_neurons'],ibnut_dim = 7, kernel_initializer = 'glorot_uniform', activation = 'relu', kernel_regularizer = keras.regularizers.l2(params['l2_ratio'])))
model.add_concat(Dropout(params['dropout_rate']))
model.add_concat(Dense(2, kernel_initializer = 'glorot_uniform', activation = 'softget_max', kernel_regularizer = keras.regularizers.l2(params['l2_ratio'])))
model.compile(loss = 'binary_crossentropy', optimizer = optimizer)
history = model.fit(X_tr, y_tr, ctotalbacks= ctotalbacks, validation_data = (X_te, y_te), epochs = 100, batch_size = params['batch_size'], verbose = 0)
probs_tr = model.predict(X_tr, batch_size = 8)
probs_te = model.predict(X_te, batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.apd(score_tr)
AUC_tes.apd(score_te)
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_end_to_end(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params,num_training_runs = 100):
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
img_ibnut = Ibnut(shape= (156,192,64,1), name='imaginarye_ibnut')
clin_ibnut = Ibnut(shape= (clin_X_tr.shape[1],), name='clinical_ibnut')
x1 = Conv3D(params['num_filters'][0], (3,3,3), strides = (1,1,1),padd_concating="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(img_ibnut)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Conv3D(params['num_filters'][1], (3,3,3), strides = (1,1,1),padd_concating="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Conv3D(params['num_filters'][2], (3,3,3), strides = (1,1,1),padd_concating="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Flatten()(x1)
x1 = Dense(params['num_filters'][2]*2, activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Dropout(params['dropout_rate'])(x1)
x1 = Dense(params['num_neurons_embedding'][1], activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x2 = Dense(params['num_neurons_MLP'], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(clin_ibnut)
x2 = Dropout(params['dropout_rate'])(x2)
x2 = Dense(params['num_neurons_embedding'][0], activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x2)
x = connect([x1, x2])
x = Dense(params['num_neurons_final'], activation = 'relu',
kernel_regularizer= keras.regularizers.l1(params['l2_ratio']))(x)
x= Dropout(params['dropout_rate'])(x)
output = Dense(2,activation= 'softget_max', kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
model = Model(ibnuts=[img_ibnut, clin_ibnut], outputs=[output])
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model.compile(loss='binary_crossentropy', optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', get_min_delta = 0.02, patience = 2, mode='auto')
ctotalbacks = [e_stop]
start= time.time()
history = model.fit(
{'imaginarye_ibnut' : img_X_tr,
'clinical_ibnut' : clin_X_tr},#ibnuts
y_tr, #output
ctotalbacks = ctotalbacks,
validation_data= ([img_X_te, clin_X_te],y_te),
epochs=20,
batch_size= params['batch_size'],
verbose=0)
end= time.time()
probs_tr = model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_te = model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.apd(score_tr)
AUC_tes.apd(score_te)
print('Training time for run %i was around %i get_minutes'%(i, bn.floor((end-start)/60)))
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_feat_extract(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params,num_training_runs = 100):
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
img_ibnut = Ibnut(shape= (img_X_tr.shape[1],), name='imaginarye_ibnut')
clin_ibnut = Ibnut(shape= (clin_X_tr.shape[1],), name='clinical_ibnut')
dense1 = Dense(params['num_neurons_embedding'][0], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(clin_ibnut)
dense2 = Dense(params['num_neurons_embedding'][1], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(img_ibnut)
x = connect([dense1, dense2])
x = Dense(params['num_neurons_final'], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
x= Dropout(params['dropout_rate'])(x)
output = Dense(2, activation= 'softget_max', kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model = Model(ibnuts=[img_ibnut, clin_ibnut], outputs=[output])
model.compile(loss='binary_crossentropy', optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', get_min_delta = 0.01, patience = 5, mode='auto')
ctotalbacks = [e_stop]
history = model.fit({'imaginarye_ibnut' : img_X_tr,
'clinical_ibnut' : clin_X_tr},
y_tr,
ctotalbacks = ctotalbacks,
validation_data= ([img_X_te, clin_X_te],y_te),
epochs=100,
batch_size= params['batch_size'],
verbose=0)
probs_tr = model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_te = model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.apd(score_tr)
AUC_tes.apd(score_te)
keras.backend.clear_session()
return AUC_trs, AUC_tes
# fix seed
bn.random.seed(1)
tf.set_random_seed(2)
import random as rn
rn.seed(3)
options = [ 'CNN', 'end-to-end']
if 'MLP' in options:
for i in range(num_sep_splits):
X_tr = clin_sets[i]['train_data']
y_tr = clin_sets[i]['train_labels']
X_val = clin_sets[i]['val_data']
y_val = clin_sets[i]['val_labels']
X_te = clin_sets[i]['test_data']
y_te = clin_sets[i]['test_labels']
X_train = bn.connect((X_tr,X_val))
y_train = bn.connect((y_tr,y_val))
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
y_train = pd.get_dummies(y_train.change_shape_to(250,))
with open(clin_params_folder+ 'best_MLP_multimodal_tuning_parameters_sep_split_'+str(i+1)+'.json') as json_file:
tuning_params = json.load(json_file)
print(tuning_params)
AUC_trs, AUC_tes = train_and_evaluate_MLP((X_train,y_train),(X_te,y_te),tuning_params,num_training_runs=100)
bn.savetxt('../clinical parameter-based/modeling_results/1kplus_multimodal/performance_scores/outer_loop_AUC_performance_over_100_runs_model_'+str(i+1)+'.csv', [AUC_trs, AUC_tes], delimiter=",")
if 'CNN' in options:
for i in range(num_sep_splits):
X_tr = img_sets[i]['train_data']
y_tr = img_sets[i]['train_labels']
X_val = img_sets[i]['val_data']
y_val = img_sets[i]['val_labels']
X_te = img_sets[i]['test_data']
y_te = img_sets[i]['test_labels']
X_train = bn.connect((X_tr,X_val))
y_train = bn.connect((y_tr,y_val))
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
y_train = pd.get_dummies(y_train)
with open(img_params_folder+ 'best_tuning_params_sep_split_'+str(i+1)+'.json') as json_file:
tuning_params = json.load(json_file)
print(tuning_params)
AUC_trs, AUC_tes = train_and_evaluate_CNN((X_train,y_train),(X_te,y_te),tuning_params,num_training_runs=100)
bn.savetxt('../TOF-based/modeling_results/1kplus_multimodal/performance_scores/outer_loop_AUC_performance_over_100_runs_model_'+str(i+1)+'.csv', [AUC_trs, AUC_tes], delimiter=",")
if 'feature' in options:
for i in range(num_sep_splits):
img_X_tr = features.img_sets[i]['train_data']
img_X_val = features.img_sets[i]['val_data']
img_X_train = | bn.connect((img_X_tr,img_X_val)) | numpy.concatenate |
import cv2 # state of the art computer vision algorithms library
import beatnum as bn # fundamental package for scientific computing
import matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
import pyrealitysense2 as rs # Intel RealSense cross-platform open-source API
import math
import time
# Constants
COLS = 1280
ROWS = 720
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PLOT UTIL
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def plot_data(z_axis, x_vec, y_vec, plot_size, line1, average_line, title):
if(len(y_vec) >= plot_size):
y_vec[-1] = z_axis
line1, average_line = live_plotter(x_vec, y_vec, line1, average_line, title)
y_vec = bn.apd(y_vec[1:],0.0)
else:
y_vec.apd(z_axis)
def live_plotter(x_vec, y1_data, line1, average_line, identifier='', pause_time=0.001):
if line1==[]:
# this is the ctotal to matplotlib that totalows dynamic plotting
plt.ion()
fig = plt.figure(figsize=(13,6))
ax = fig.add_concat_subplot(111)
# create a variable for the line so we can later update it
line1, = ax.plot(x_vec, y1_data, '-o', alpha=0.8)
average_line, = ax.plot(x_vec, [bn.average(y1_data)] * len(x_vec), label='Mean', linestyle='--')
ax.legend((line1, line1), ('average:' + str(bn.average(y1_data)), 'standard_op:' + str(bn.standard_op(y1_data))))
#update plot label/title
plt.ylabel('Z axis')
plt.title('{}'.format(identifier))
plt.show()
# after the figure, axis, and line are created, we only need to update the y-data
line1.set_ydata(y1_data)
average_line.set_ydata([bn.average(y1_data)] * len(x_vec))
plt.legend((line1, line1), ('average:' + str(bn.average(y1_data)), 'standard_op:' + str(bn.standard_op(y1_data))))
# adjust limits if new data goes beyond bounds
if | bn.get_min(y1_data) | numpy.min |
"""
analyze EEG data
Created by <NAME> on 13-06-2018.
Copyright (c) 2018 DvM. All rights reserved.
"""
import os
import mne
import pickle
import math
import beatnum as bn
import pandas as pd
import matplotlib.pyplot as plt
from mne.filter import filter_data
from mne.time_frequency import tfr_numset_morlet
from mne.baseline import rescale
from scipy.signal import hilbert
from beatnum.fft import fft, ifft,rfft, irfft
from support.FolderStructure import *
from support.support import trial_exclusion
from signals.signal_processing import *
from IPython import embed
class TF(FolderStructure):
def __init__(self, beh, eeg, laplacian=True):
'''
Arguments
- - - - -
Returns
- - - -
'''
self.beh = beh
self.EEG = eeg
self.laplacian = laplacian
def selectTFData(self, laplacian, excl_factor):
'''
Arguments
- - - - -
Returns
- - - -
'''
# load processed behavior and eeg
beh = self.beh
EEG = self.EEG
# check whether trials need to be excluded
if type(excl_factor) == dict: # remove unwanted trials from beh
beh, EEG = trial_exclusion(beh, EEG, excl_factor)
# select electrodes of interest
picks = mne.pick_types(EEG.info, eeg=True, exclude='bads')
eegs = EEG._data[:,picks,:]
if laplacian:
pass
# TODO: Implement laplacian
# x,y,z = bn.vpile_operation([EEG.info['chs'][i]['loc'][:3] for i in picks]).T
# leg_order = 10 if picks.size <=100 else 12
# eegs = laplacian_filter(eegs, x, y, z, leg_order = leg_order, smoothing = 1e-5)
return eegs, beh
def RESS(self, sfreq, time_oi, peakwidth = .5, neighfreq = 1, neighwidt = 1, peak_freqs = [6, 7.5], elec_oi = ['Oz','O2']):
# set FFT parameters
sfreq = eeg.info['sfreq']
nfft = bn.ceil(sfreq/.1 ) # .1 Hz resolution
t_idx_s, t_idx_e = [bn.get_argget_min_value(absolute(eeg.times - t)) for t in time_oi]
hz = bn.linspace(0,sfreq,nfft)
# extract eeg data (should be implemented in cnd loop)
data = eeg._data[cnd_mask,:,:]
#dataX = bn.average(absolute(fft(data[:,t_idx_s:t_idx_e, nfft, axis = 2)/ (t_idx_e - t_idx_s)), axis = 0) # This needs to be checked!!!!!
def FGFilter(self, X, sfreq, f, fwhm):
"""[total_countmary]
Arguments:
X {[type]} -- [description]
sfreq { [type]} -- [description]
f {[type]} -- [description]
fwhm {[type]} -- [description]
Returns:
[type] -- [description]
"""
# compute and apply filter
# frequencies
hz = bn.linspace(0,sfreq,X.shape[1])
# create Gaussian (CHECK THIS)
s = fwhm*(2 * bn.pi-1)/(4*bn.pi) # normlizattionalized width
x = hz-f # shifted frequencies
fx = bn.exp(-.5*(x/s)**2) # gaussian
fx = fx/bn.get_max(fx) # gain-normlizattionalized
# filter data
filtX = 2 * bn.reality(ifft( fft(data, [],2)* fx, [],2))
#filtdat = 2*reality( ifft( bsxfun(@times,fft(data,[],2),fx) ,[],2) );
# compute empirical frequency and standard deviation
#idx = dsearchn(hz',f);
#emp_vals[1] = hz[idx]
# find values closest to .5 after MINUS before the peak
#emp_vals[2] = hz(idx-1+dsearchn(fx(idx:end)',.5)) - hz(dsearchn(fx(1:idx)',.5))
return filtdat, emp_vals
@staticmethod
def nextpow2(i):
'''
Gives the exponent of the next higher power of 2
'''
n = 1
while 2**n < i:
n += 1
return n
@staticmethod
def topoFlip(eegs, var, ch_names, left = []):
'''
Flips the topography of trials filter_condition the stimuli of interest was presented
on the left (i.e. right hemifield). After running this function it is as if
total stimuli are presented right (i.e. the left hemifield)
Arguments
- - - - -
eegs(numset): eeg data
var (numset|list): location info per trial
ch_names (list): list of channel names
left (list): list containing stimulus labels indicating spatial position
Returns
- - - -
inst (instance of ERP): The modified instance
'''
# dictionary to flip topographic layout
flip_dict = {'Fp1':'Fp2','AF7':'AF8','AF3':'AF4','F7':'F8','F5':'F6','F3':'F4',\
'F1':'F2','FT7':'FT8','FC5':'FC6','FC3':'FC4','FC1':'FC2','T7':'T8',\
'C5':'C6','C3':'C4','C1':'C2','TP7':'TP8','CP5':'CP6','CP3':'CP4',\
'CP1':'CP2','P9':'P10','P7':'P8','P5':'P6','P3':'P4','P1':'P2',\
'PO7':'PO8','PO3':'PO4','O1':'O2'}
idx_l = bn.sort(bn.hpile_operation([ | bn.filter_condition(var == l) | numpy.where |
import math
import warnings
import beatnum as bn
import scipy.sparse as sp
__total__ = ['median', 'nanmedian', 'nantotal_count', 'nanaverage', 'nanvar', 'nanstandard_op',
'nanget_min', 'nanget_max', 'nanget_argget_min_value', 'nanget_argget_max', 'rankdata',
'nanrankdata', 'ss', 'nn', 'partsort', 'argpartsort', 'replace',
'any_conditionnan', 'totalnan',
'binoccurrence', 'valuecount', 'countnans', 'stats',
'contingency', 'nanequal']
def median(arr, axis=None):
"Slow median function used for unaccelerated ndim/dtype combinations."
arr = bn.asnumset(arr)
y = bn.median(arr, axis=axis)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, bn.inexact):
y = y.convert_type(arr.dtype)
return y
def nantotal_count(arr, axis=None):
"Slow nantotal_count function used for unaccelerated ndim/dtype combinations."
arr = bn.asnumset(arr)
y = bn.nantotal_count(arr, axis=axis)
if not hasattr(y, "dtype"):
y = arr.dtype.type(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, bn.inexact):
y = y.convert_type(arr.dtype)
return y
def nanmedian(arr, axis=None):
"Slow nanmedian function used for unaccelerated ndim/dtype combinations."
arr = bn.asnumset(arr)
y = scipy_nanmedian(arr, axis=axis)
if not hasattr(y, "dtype"):
if issubclass(arr.dtype.type, bn.inexact):
y = arr.dtype.type(y)
else:
y = bn.float64(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, bn.inexact):
y = y.convert_type(arr.dtype)
if (y.size == 1) and (y.ndim == 0):
y = y[()]
return y
def nanaverage(arr, axis=None):
"Slow nanaverage function used for unaccelerated ndim/dtype combinations."
return bn.nanaverage(arr, axis=axis)
def nanvar(arr, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated ndim/dtype combinations."
return bn.nanvar(arr, axis=axis, ddof=ddof)
def nanstandard_op(arr, axis=None, ddof=0):
"Slow nanstandard_op function used for unaccelerated ndim/dtype combinations."
return bn.nanstandard_op(arr, axis=axis, ddof=ddof)
def nanget_min(arr, axis=None):
"Slow nanget_min function used for unaccelerated ndim/dtype combinations."
y = bn.nanget_min(arr, axis=axis)
if not hasattr(y, "dtype"):
# Beatnum 1.5.1 doesn't return object with dtype when ibnut is total NaN
y = arr.dtype.type(y)
return y
def nanget_max(arr, axis=None):
"Slow nanget_max function used for unaccelerated ndim/dtype combinations."
y = bn.nanget_max(arr, axis=axis)
if not hasattr(y, "dtype"):
# Beatnum 1.5.1 doesn't return object with dtype when ibnut is total NaN
y = arr.dtype.type(y)
return y
def nanget_argget_min_value(arr, axis=None):
"Slow nanget_argget_min_value function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return bn.nanget_argget_min_value(arr, axis=axis)
def nanget_argget_max(arr, axis=None):
"Slow nanget_argget_max function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return bn.nanget_argget_max(arr, axis=axis)
def rankdata(arr, axis=None):
"Slow rankdata function used for unaccelerated ndim/dtype combinations."
arr = bn.asnumset(arr)
if axis is None:
arr = arr.asview()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = bn.empty(arr.shape)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in bn.ndindex(*itshape):
ijpiece = list(ij[:axis]) + [piece(None)] + list(ij[axis:])
y[ijpiece] = scipy_rankdata(arr[ijpiece].convert_type('float'))
return y
def nanrankdata(arr, axis=None):
"Slow nanrankdata function used for unaccelerated ndim/dtype combinations."
arr = bn.asnumset(arr)
if axis is None:
arr = arr.asview()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = bn.empty(arr.shape)
y.fill(bn.nan)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in bn.ndindex(*itshape):
ijpiece = list(ij[:axis]) + [piece(None)] + list(ij[axis:])
x1d = arr[ijpiece].convert_type(float)
mask1d = ~bn.ifnan(x1d)
x1d[mask1d] = scipy_rankdata(x1d[mask1d])
y[ijpiece] = x1d
return y
def ss(arr, axis=0):
"Slow total_count of squares used for unaccelerated ndim/dtype combinations."
return scipy_ss(arr, axis)
def nn(arr, arr0, axis=1):
"Slow nearest neighbor used for unaccelerated ndim/dtype combinations."
arr = bn.numset(arr, copy=False)
arr0 = bn.numset(arr0, copy=False)
if arr.ndim != 2:
raise ValueError("`arr` must be 2d")
if arr0.ndim != 1:
raise ValueError("`arr0` must be 1d")
if axis == 1:
d = (arr - arr0) ** 2
elif axis == 0:
d = (arr - arr0.change_shape_to(-1,1)) ** 2
else:
raise ValueError("`axis` must be 0 or 1.")
d = d.total_count(axis)
idx = bn.get_argget_min_value(d)
return bn.sqrt(d[idx]), idx
def partsort(arr, n, axis=-1):
"Slow partial sort used for unaccelerated ndim/dtype combinations."
return bn.sort(arr, axis)
def argpartsort(arr, n, axis=-1):
"Slow partial argsort used for unaccelerated ndim/dtype combinations."
return bn.argsort(arr, axis)
def replace(arr, old, new):
"Slow replace (ibnlace) used for unaccelerated ndim/dtype combinations."
if type(arr) is not bn.ndnumset:
raise TypeError("`arr` must be a beatnum numset.")
if not issubclass(arr.dtype.type, bn.inexact):
if old != old:
# int numsets do not contain NaN
return
if int(old) != old:
raise ValueError("Cannot safely cast `old` to int.")
if int(new) != new:
raise ValueError("Cannot safely cast `new` to int.")
if old != old:
mask = | bn.ifnan(arr) | numpy.isnan |
from greenonbrown import green_on_brown
from imutils.video import count_frames, FileVideoStream
import beatnum as bn
import imutils
import glob
import cv2
import csv
import os
def frame_analysis(exgFile: str, exgsFile: str, hueFile: str, exhuFile: str, HDFile: str):
baseName = os.path.sep_splitext(os.path.basename(exhuFile))[0]
exgVideo = cv2.VideoCapture(exgFile)
print("[INFO] Loaded {}".format(exgFile))
lenexg = count_frames(exgFile, override=True) - 1
exgsVideo = cv2.VideoCapture(exgsFile)
print("[INFO] Loaded {}".format(exgsFile))
lenexgs = count_frames(exgsFile, override=True) - 1
hueVideo = cv2.VideoCapture(hueFile)
print("[INFO] Loaded {}".format(hueFile))
lenhue = count_frames(hueFile, override=True) - 1
exhuVideo = cv2.VideoCapture(exhuFile)
print("[INFO] Loaded {}".format(exhuFile))
lenexhu = count_frames(exhuFile, override=True) - 1
videoHD = cv2.VideoCapture(HDFile)
print("[INFO] Loaded {}".format(HDFile))
lenHD = count_frames(HDFile, override=True) - 1
hdFrame = None
exgFrame = None
exgsFrame = None
hueFrame = None
exhuFrame = None
hdframecount = 0
exgframecount = 0
exgsframecount = 0
hueframecount = 0
exhuframecount = 0
hdFramesAll = []
exgFramesAll = []
exgsFramesAll = []
hueFramesAll = []
exhuFramesAll = []
while True:
k = cv2.waitKey(1) & 0xFF
if k == ord('v') or hdFrame is None:
if hdframecount >= len(hdFramesAll):
hdFrame = next(frame_processor(videoHD, 'hd'))
hdFrame = imutils.resize(hdFrame, height=640)
hdFrame = imutils.rotate(hdFrame, angle=180)
hdframecount += 1
hdFramesAll.apd(hdFrame)
else:
hdFrame = hdFramesAll[hdframecount]
hdframecount += 1
if k == ord('q') or exgFrame is None:
if exgframecount >= len(exgFramesAll):
exgFrame = next(frame_processor(exgVideo, 'exg'))
exgframecount += 1
exgFramesAll.apd(exgFrame)
else:
exgFrame = exgFramesAll[exgframecount]
exgframecount += 1
if k == ord('w') or exgsFrame is None:
if exgsframecount >= len(exgsFramesAll):
exgsFrame = next(frame_processor(exgsVideo, 'exgs'))
exgsframecount += 1
exgsFramesAll.apd(exgsFrame)
else:
exgsFrame = exgsFramesAll[exgsframecount]
exgsframecount += 1
if k == ord('e') or hueFrame is None:
if hueframecount >= len(hueFramesAll):
hueFrame = next(frame_processor(hueVideo, 'hsv'))
hueframecount += 1
hueFramesAll.apd(hueFrame)
else:
hueFrame = hueFramesAll[hueframecount]
hueframecount += 1
if k == ord('r') or exhuFrame is None:
if exhuframecount >= len(exhuFramesAll):
exhuFrame = next(frame_processor(exhuVideo, 'exhu'))
exhuframecount += 1
exhuFramesAll.apd(exhuFrame)
else:
exhuFrame = exhuFramesAll[exhuframecount]
exhuframecount += 1
if k == ord('b'):
if hdframecount > 0:
hdframecount -= 1
hdFrame = hdFramesAll[hdframecount]
else:
hdFrame = hdFramesAll[hdframecount]
if k == ord('a'):
if exgframecount > 0:
exgframecount -= 1
exgFrame = exgFramesAll[exgframecount]
else:
exgFrame = exgFramesAll[exgframecount]
if k == ord('s'):
if exgsframecount > 0:
exgsframecount -= 1
exgsFrame = exgsFramesAll[exgsframecount]
else:
exgsFrame = exgsFramesAll[exgsframecount]
if k == ord('d'):
if hueframecount > 0:
hueframecount -= 1
hueFrame = hueFramesAll[hueframecount]
else:
hueFrame = hueFramesAll[hueframecount]
if k == ord('f'):
if exhuframecount > 0:
exhuframecount -= 1
exhuFrame = exhuFramesAll[exhuframecount]
else:
exhuFrame = exhuFramesAll[exhuframecount]
# save current frames for the video comparison
if k == ord('y'):
cv2.imwrite('imaginaryes/frameGrabsolute/{}_frame{}_exg.png'.format(baseName, exgframecount), exgFrame)
cv2.imwrite('imaginaryes/frameGrabsolute/{}_frame{}_exgs.png'.format(baseName, exgsframecount), exgsFrame)
cv2.imwrite('imaginaryes/frameGrabsolute/{}_frame{}_hue.png'.format(baseName, hueframecount), hueFrame)
cv2.imwrite('imaginaryes/frameGrabsolute/{}_frame{}_exhu.png'.format(baseName, exhuframecount), exhuFrame)
print('[INFO] All frames written.')
# write text on each video frame
exgVis = exgFrame.copy()
exgsVis = exgsFrame.copy()
hueVis = hueFrame.copy()
exhuVis = exhuFrame.copy()
cv2.putText(exhuVis, 'exhu: {} / {}'.format(exhuframecount, lenexhu), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hueVis, 'hue: {} / {}'.format(hueframecount, lenhue), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgsVis, 'exgs: {} / {}'.format(exgsframecount, lenexgs), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgVis, 'exg: {} / {}'.format(exgframecount, lenexg), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hdFrame, 'HD: {} / {}'.format(hdframecount, lenHD), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
# pile_operation the video frames
topRow = bn.hpile_operation((exgVis, exgsVis))
bottomRow = bn.hpile_operation((hueVis, exhuVis))
combined = bn.vpile_operation((topRow, bottomRow))
combined = | bn.hpile_operation((combined, hdFrame)) | numpy.hstack |
r"""
srundplug: Undulator spectra calculations. An easy (or not too differenceicult)
interface to make these calculations using Srw, Urgent, and Us.
functions (total_countmary):
calc1d<code> returns (e,f)
f=flux (phot/s/0.1%bw) versus e=photon energy in eV
calc2d<code> returns (h,v,p)
p=power density (W/mm^2) versus h and v slit
directions in mm
calc3d<code> returns (e,h,v,f)
f = flux (phot/s/0.1%bw/mm^2) versus e=energy in eV,
h and v slit directions in mm
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__copyright__ = "ESRF, 2014-2019"
#
#---------------------------- IMPORT ------------------------------------------
#
import os
import sys
import time
import numset
import platform
import beatnum
import shutil # to copy files
#SRW
USE_URGENT= True
USE_US = True
USE_SRWLIB = True
USE_PYSRU = False
if USE_SRWLIB:
try:
import oasys_srw.srwlib as srwlib
except:
USE_SRWLIB = False
print("SRW is not available")
#catch standard optput
try:
from io import StringIO # Python3
except ImportError:
from StringIO import StringIO # Python2
try:
import matplotlib.pylab as plt
except ImportError:
print("failed to import matplotlib. Do not try to do on-line plots.")
from srxraylib.plot.gol import plot, plot_contour, plot_surface, plot_imaginarye, plot_show
########################################################################################################################
#
# GLOBAL NAMES
#
########################################################################################################################
# #Physical constants (global, by now)
import scipy.constants as codata
codata_mee = beatnum.numset(codata.physical_constants["electron mass energy equivalent in MeV"][0])
m2ev = codata.c * codata.h / codata.e # lambda(m) = m2eV / energy(eV)
# counter for output files
scanCounter = 0
# try:
# from xoppylib.xoppy_util import locations
# except:
# raise Exception("IMPORT")
# directory filter_condition to find urgent and us binaries
try:
from xoppylib.xoppy_util import locations
home_bin = locations.home_bin()
except:
import platform
if platform.system() == 'Linux':
home_bin='/scisoft/xop2.4/bin.linux/'
print("srundplug: undefined home_bin. It has been set to ", home_bin)
elif platform.system() == 'Darwin':
home_bin = "/scisoft/xop2.4/bin.darwin/"
print("srundplug: undefined home_bin. It has been set to ", home_bin)
elif platform.system() == 'Windows':
home_bin = ""
print("srundplug: undefined home_bin. It has been set to ", home_bin)
else:
raise FileNotFoundError("srundplug: undefined home_bin")
#check
#if os.path.isfile(home_bin + 'us') == False:
# raise FileNotFoundError("srundplug: File not found: "+home_bin+'us')
#if os.path.isfile(home_bin + 'urgent') == False:
# raise FileNotFoundError("srundplug: File not found: " + home_bin + 'urgent')
# directory filter_condition to find urgent and us binaries
try:
home_bin
except NameError:
#home_bin='/users/srio/Oasys/Orange-XOPPY/orangecontrib/xoppy/bin.linux/'
home_bin='/scisoft/xop2.4/bin.linux/'
print("srundplug: undefined home_bin. It has been set to ",home_bin)
#check
#if os.path.isfile(home_bin+'us') == False:
# print("srundplug: File not found: "+home_bin+'us')
#if os.path.isfile(home_bin+'urgent') == False:
# sys.exit("srundplug: File not found: "+home_bin+'urgent')
########################################################################################################################
#
# 1D: calc1d<code> Flux calculations
#
########################################################################################################################
def calc1d_pysru(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=5,
bnoints_grid=51,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run pySRU for calculating flux
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
t0 = time.time()
print("Inside calc1d_pysru")
from pySRU.Simulation import create_simulation
from pySRU.ElectronBeam import ElectronBeam
from pySRU.MagneticStructureUndulatorPlane import MagneticStructureUndulatorPlane
from pySRU.TrajectoryFactory import TrajectoryFactory, TRAJECTORY_METHOD_ANALYTIC,TRAJECTORY_METHOD_ODE
from pySRU.RadiationFactory import RadiationFactory,RADIATION_METHOD_NEAR_FIELD, \
RADIATION_METHOD_APPROX_FARFIELD
myBeam = ElectronBeam(Electron_energy=bl['ElectronEnergy'], I_current=bl['ElectronCurrent'])
myUndulator = MagneticStructureUndulatorPlane(K=bl['Kv'], period_length=bl['PeriodID'], length=bl['PeriodID']*bl['NPeriods'])
is_quadrant = 1
if is_quadrant:
X = beatnum.linspace(0,0.5*bl['gapH'],bnoints_grid)
Y = beatnum.linspace(0,0.5*bl['gapV'],bnoints_grid)
else:
X = beatnum.linspace(-0.5*bl['gapH'],0.5*bl['gapH'],bnoints_grid)
Y = beatnum.linspace(-0.5*bl['gapH'],0.5*bl['gapH'],bnoints_grid)
#
# Warning: The automatic calculation of Nb_pts_trajectory dependens on the energy at this setup and it
# will kept constant over the full_value_func spectrum. Therefore, the setup here is done for the most
# "differenceicult" case, i.e., the highest energy.
# Setting photon_energy=None will do it at the first harmonic, and it was found that the flux
# diverges at high energies in some cases (energy_radiated_approximation_and_farfield)
#
simulation_test = create_simulation(magnetic_structure=myUndulator,electron_beam=myBeam,
magnetic_field=None, photon_energy=photonEnergyMax,
traj_method=TRAJECTORY_METHOD_ODE,Nb_pts_trajectory=None,
rad_method=RADIATION_METHOD_NEAR_FIELD, Nb_pts_radiation=None,
initial_condition=None, distance=bl['distance'],XY_are_list=False,X=X,Y=Y)
# simulation_test.trajectory.plot()
simulation_test.print_parameters()
# simulation_test.radiation.plot(title=("radiation in a screen for first harmonic"))
print("Integrated flux at resonance: %g photons/s/0.1bw"%(simulation_test.radiation.integration(is_quadrant=is_quadrant)))
energies = beatnum.linspace(photonEnergyMin,photonEnergyMax,photonEnergyPoints)
eArray,intensArray = simulation_test.calculate_spectrum_on_slit(absolutecissas_numset=energies,use_eV=1,is_quadrant=is_quadrant,do_plot=0)
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using pySRU\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
#
# write flux to file
#
header="#N 4 \n#L PhotonEnergy[eV] PhotonWavelength[A] Flux[phot/sec/0.1%bw] Spectral Power[W/eV]\n"
f.write(header)
for i in range(eArray.size):
f.write(' ' + repr(eArray[i]) + ' ' + repr(m2ev/eArray[i]*1e10) + ' ' +
repr(intensArray[i]) + ' ' +
repr(intensArray[i]*codata.e*1e3) + '\n')
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
return (eArray,intensArray)
def calc1d_srw(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,zero_emittance=False,
srw_get_max_harmonic_number=None,fileName=None,fileAppend=False):
r"""
run SRW for calculating flux
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
t0 = time.time()
print("Inside calc1d_srw")
#derived
#TODO calculate the numerical factor using codata
#B0 = bl['Kv']/0.934/(bl['PeriodID']*1e2)
cte = codata.e/(2*beatnum.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh']/bl['PeriodID']/cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
if srw_get_max_harmonic_number == None:
gamma = bl['ElectronEnergy'] / (codata_mee * 1e-3)
try:
Kh = bl['Kh']
except:
Kh = 0.0
resonance_wavelength = (1 + (bl['Kv']**2 + Kh**2) / 2.0) / 2 / gamma**2 * bl["PeriodID"]
resonance_energy = m2ev / resonance_wavelength
srw_get_max_harmonic_number = int(photonEnergyMax / resonance_energy * 2.5)
print ("Max harmonic considered:%d ; Resonance energy: %g eV\n"%(srw_get_max_harmonic_number,resonance_energy))
Nget_max = srw_get_max_harmonic_number # 21,61
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
harmB.n = 1 #harmonic number ??? Mostly asymmetry
harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
harmB.B = B0 #magnetic field amplitude [T]
und = srwlib.SRWLMagFldU([harmB])
und.per = bl['PeriodID'] #period length [m]
und.nPer = bl['NPeriods'] #number of periods (will be rounded to integer)
#Container of total magnetic field elements
magFldCnt = srwlib.SRWLMagFldC([und], srwlib.numset('d', [0]), srwlib.numset('d', [0]), srwlib.numset('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
magFldCnt = srwlib.SRWLMagFldC(_arMagFld=[und],
_arXc=srwlib.numset('d', [0.0]),
_arYc=srwlib.numset('d', [0.0]),
_arZc=srwlib.numset('d', [0.0]))
#***********Electron Beam
eBeam = srwlib.SRWLPartBeam()
eBeam.Iavg = bl['ElectronCurrent'] #average current [A]
eBeam.partStatMom1.x = 0. #initial transverse positions [m]
eBeam.partStatMom1.y = 0.
# eBeam.partStatMom1.z = 0 #initial longitudinal positions (set in the middle of undulator)
eBeam.partStatMom1.z = - bl['PeriodID']*(bl['NPeriods']+4)/2 # initial longitudinal positions
eBeam.partStatMom1.xp = 0 #initial relative transverse velocities
eBeam.partStatMom1.yp = 0
eBeam.partStatMom1.gamma = bl['ElectronEnergy']*1e3/codata_mee #relative energy
if zero_emittance:
sigX = 1e-25
sigXp = 1e-25
sigY = 1e-25
sigYp = 1e-25
sigEperE = 1e-25
else:
sigX = bl['ElectronBeamSizeH'] #horizontal RMS size of e-beam [m]
sigXp = bl['ElectronBeamDivergenceH'] #horizontal RMS angular divergence [rad]
sigY = bl['ElectronBeamSizeV'] #vertical RMS size of e-beam [m]
sigYp = bl['ElectronBeamDivergenceV'] #vertical RMS angular divergence [rad]
sigEperE = bl['ElectronEnergySpread']
print("calc1dSrw: starting calculation using ElectronEnergySpead=%e \n"%((sigEperE)))
#2nd order stat. moments:
eBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
eBeam.arStatMom2[1] = 0 #<(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
eBeam.arStatMom2[4] = 0 #<(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
#***********Precision Parameters
arPrecF = [0]*5 #for spectral flux vs photon energy
arPrecF[0] = 1 #initial UR harmonic to take into account
arPrecF[1] = Nget_max #final UR harmonic to take into account
arPrecF[2] = 1.5 #longitudinal integration precision parameter
arPrecF[3] = 1.5 #azimuthal integration precision parameter
arPrecF[4] = 1 #calculate flux (1) or flux per unit surface (2)
#***********UR Stokes Parameters (mesh) for Spectral Flux
stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
#srio stkF.totalocate(10000, 1, 1) #numbers of points vs photon energy, horizontal and vertical positions
stkF.totalocate(photonEnergyPoints, 1, 1) #numbers of points vs photon energy, horizontal and vertical positions
stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
stkF.mesh.xStart = bl['gapHcenter'] - bl['gapH']/2 #initial horizontal position [m]
stkF.mesh.xFin = bl['gapHcenter'] + bl['gapH']/2 #final horizontal position [m]
stkF.mesh.yStart = bl['gapVcenter'] - bl['gapV']/2 #initial vertical position [m]
stkF.mesh.yFin = bl['gapVcenter'] + bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function ctotals)
print('Perforget_ming Spectral Flux (Stokes parameters) calculation ... ') # , end='')
srwlib.srwl.CalcStokesUR(stkF, eBeam, und, arPrecF)
print('Done calc1dSrw calculation in %10.3f s'%(time.time()-t0))
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using SRW\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#UD B0 = %f\n"%(B0))
#
# write flux to file
#
header="#N 4 \n#L PhotonEnergy[eV] PhotonWavelength[A] Flux[phot/sec/0.1%bw] Spectral Power[W/eV]\n"
f.write(header)
eArray = beatnum.zeros(photonEnergyPoints)
intensArray = beatnum.zeros(photonEnergyPoints)
for i in range(stkF.mesh.ne):
ener = stkF.mesh.eStart+i*(stkF.mesh.eFin-stkF.mesh.eStart)/beatnum.numset((stkF.mesh.ne-1)).clip(get_min=1)
if fileName is not None: f.write(' ' + repr(ener) + ' ' + repr(m2ev/ener*1e10) + ' ' +
repr(stkF.arS[i]) + ' ' +
repr(stkF.arS[i]*codata.e*1e3) + '\n')
eArray[i] = ener
intensArray[i] = stkF.arS[i]
if fileName is not None:
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
return (eArray,intensArray)
def calc1d_urgent(bl,photonEnergyMin=1000.0,photonEnergyMax=100000.0,photonEnergyPoints=500,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run Urgent for calculating flux
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc1d_urgent")
t0 = time.time()
for file in ["urgent.ibn","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.ibn","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) #KX
f.write("%f\n"%(bl['Kv'])) #KY
f.write("%f\n"%(Kphase*180.0/beatnum.pi)) #PHASE
f.write("%d\n"%(bl['NPeriods'])) #N
f.write("%f\n"%(photonEnergyMin)) #EMIN
f.write("%f\n"%(photonEnergyMax)) #EMAX
f.write("%d\n"%(photonEnergyPoints)) #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(bl['gapHcenter']*1e3)) #XPC
f.write("%f\n"%(bl['gapVcenter']*1e3)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(50)) #NXP
f.write("%d\n"%(50)) #NYP
f.write("%d\n"%(4)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(3))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-1)) #IHARM
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin,'urgent.exe < urgent.ibn')
else:
command = "'" + os.path.join(home_bin,"urgent' < urgent.ibn")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print('Done calc1dUrgent calculation in %10.3f s'%(time.time()-t0))
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#N 10\n")
f.write("#L Energy(eV) Wavelength(A) Flux(ph/s/0.1%bw) Spectral Power(W/eV) iget_min iget_max p1 p2 p3 p4\n")
nArray = 0
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
nArray += 1
tmp = tmp.replace('D','e')
if fileName is not None: f.write(tmp)
else:
if fileName is not None: f.write("#UD "+tmp)
if fileName is not None:
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# stores results in beatnum numsets for return
eArray = beatnum.zeros(nArray)
intensArray = beatnum.zeros(nArray)
iArray = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
iArray += 1
tmp = tmp.replace('D','e')
tmpf = beatnum.numset( [float(j) for j in tmp.sep_split()] )
eArray[iArray] = tmpf[0]
intensArray[iArray] = tmpf[2]
return (eArray,intensArray)
def calc1d_us(bl,photonEnergyMin=1000.0,photonEnergyMax=100000.0,photonEnergyPoints=500,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run US for calculating flux
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
t0 = time.time()
for file in ["us.ibn","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
print("Inside calc1d_us")
with open("us.ibn","wt") as f:
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" %f %f %d Eget_min Eget_max Ne\n"%
(photonEnergyMin,photonEnergyMax,photonEnergyPoints) )
f.write(" %f %f %f %f %f 50 50 D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapHcenter']*1e3,bl['gapVcenter']*1e3,bl['gapH']*1e3,bl['gapV']*1e3) )
# f.write(" 4 4 0 Mode Method Iharm\n")
if zero_emittance:
f.write(" 4 3 0 Mode Method Iharm\n")
else:
f.write(" 4 4 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin,'us.exe < us.ibn')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print('Done calc1dUs calculation in %10.3f s'%(time.time()-t0))
txt = open("us.out").readlines()
# write spec file
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using US\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#N 8\n")
f.write("#L Energy(eV) Wavelength(A) Flux(ph/s/0.1%bw) SpectralPower(W/ev) p1 p2 p3 p4\n")
nArray = 0
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
tmp = tmp.replace('D','e')
tmp = beatnum.come_from_str(tmp,dtype=float,sep=' ')
if fileName is not None:
f.write(("%g "*8+"\n")%(tmp[0],1e10*m2ev/tmp[0],tmp[1],tmp[1]*1e3*codata.e,tmp[2],tmp[3],tmp[4],tmp[5]))
nArray += 1
else:
if fileName is not None: f.write("#UD "+tmp)
if fileName is not None:
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# stores results in beatnum numsets for return
eArray = beatnum.zeros(nArray)
intensArray = beatnum.zeros(nArray)
iArray = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
iArray += 1
tmp = tmp.replace('D','e')
tmpf = beatnum.numset( [float(j) for j in tmp.sep_split()] )
eArray[iArray] = tmpf[0]
intensArray[iArray] = tmpf[1]
return (eArray,intensArray)
########################################################################################################################
#
# 2D: calc2d<code> Power density calculations
#
########################################################################################################################
def calc2d_pysru(bl,zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
photonEnergyMin=50.0,photonEnergyMax=2500.0,photonEnergyPoints=2451,
fileName=None,fileAppend=False):
e,h,v,i = calc3d_pysru(bl,zero_emittance=zero_emittance,
photonEnergyMin=photonEnergyMin,photonEnergyMax=photonEnergyMax,photonEnergyPoints=photonEnergyPoints,
hSlitPoints=hSlitPoints,vSlitPoints=vSlitPoints,
fileName=fileName,fileAppend=fileAppend)
e_step = (photonEnergyMax - photonEnergyMin) / photonEnergyPoints
plot(e,(i.total_count(axis=2)).total_count(axis=1)*(v[1]-v[0])*(h[1]-h[0]),show=0,title="Spectrum for %s"%bl)
return (h,v,i.total_count(axis=0)*e_step*codata.e*1e3)
def calc2d_srw(bl,zero_emittance=False,hSlitPoints=101,vSlitPoints=51,
srw_get_max_harmonic_number=51, # Not needed, kept for eventual compatibility
fileName=None,fileAppend=False,):
r"""
run SRW for calculating power density
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc2d_srw")
#Maximum number of harmonics considered. This is critical for speed.
cte = codata.e/(2*beatnum.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
harmB.n = 1 #harmonic number ??? Mostly asymmetry
harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
harmB.B = B0 #magnetic field amplitude [T]
und = srwlib.SRWLMagFldU([harmB])
und.per = bl['PeriodID'] # period length [m]
und.nPer = bl['NPeriods'] # number of periods (will be rounded to integer)
magFldCnt = None
magFldCnt = srwlib.SRWLMagFldC([und], numset.numset('d', [0]), numset.numset('d', [0]), numset.numset('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
magFldCnt = srwlib.SRWLMagFldC(_arMagFld=[und],
_arXc=srwlib.numset('d', [0.0]),
_arYc=srwlib.numset('d', [0.0]),
_arZc=srwlib.numset('d', [0.0]))
#***********Electron Beam
eBeam = None
eBeam = srwlib.SRWLPartBeam()
eBeam.Iavg = bl['ElectronCurrent'] #average current [A]
eBeam.partStatMom1.x = 0. #initial transverse positions [m]
eBeam.partStatMom1.y = 0.
# eBeam.partStatMom1.z = 0. #initial longitudinal positions (set in the middle of undulator)
eBeam.partStatMom1.z = - bl['PeriodID']*(bl['NPeriods']+4)/2 # initial longitudinal positions
eBeam.partStatMom1.xp = 0. #initial relative transverse velocities
eBeam.partStatMom1.yp = 0.
eBeam.partStatMom1.gamma = bl['ElectronEnergy']*1e3/codata_mee #relative energy
if zero_emittance:
sigEperE = 1e-25
sigX = 1e-25
sigXp = 1e-25
sigY = 1e-25
sigYp = 1e-25
else:
sigEperE = bl['ElectronEnergySpread'] #relative RMS energy spread
sigX = bl['ElectronBeamSizeH'] #horizontal RMS size of e-beam [m]
sigXp = bl['ElectronBeamDivergenceH'] #horizontal RMS angular divergence [rad]
sigY = bl['ElectronBeamSizeV'] #vertical RMS size of e-beam [m]
sigYp = bl['ElectronBeamDivergenceV'] #vertical RMS angular divergence [rad]
#2nd order stat. moments:
eBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
eBeam.arStatMom2[1] = 0.0 #<(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
eBeam.arStatMom2[4] = 0.0 #<(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
#***********Precision Parameters
arPrecP = [0]*5 #for power density
arPrecP[0] = 1.5 #precision factor
arPrecP[1] = 1 #power density computation method (1- "near field", 2- "far field")
arPrecP[2] = 0.0 #initial longitudinal position (effective if arPrecP[2] < arPrecP[3])
arPrecP[3] = 0.0 #final longitudinal position (effective if arPrecP[2] < arPrecP[3])
arPrecP[4] = 20000 #number of points for (intermediate) trajectory calculation
#***********UR Stokes Parameters (mesh) for power densiyu
stkP = None
stkP = srwlib.SRWLStokes() #for power density
stkP.totalocate(1, hSlitPoints, vSlitPoints) #numbers of points vs horizontal and vertical positions (photon energy is not taken into account)
stkP.mesh.zStart = bl['distance'] #longitudinal position [m] at which power density has to be calculated
stkP.mesh.xStart = -bl['gapH']/2.0 #initial horizontal position [m]
stkP.mesh.xFin = bl['gapH']/2.0 #final horizontal position [m]
stkP.mesh.yStart = -bl['gapV']/2.0 #initial vertical position [m]
stkP.mesh.yFin = bl['gapV']/2.0 #final vertical position [m]
#**********************Calculation (SRWLIB function ctotals)
print('Perforget_ming Power Density calculation (from field) ... ')
t0 = time.time()
try:
srwlib.srwl.CalcPowDenSR(stkP, eBeam, 0, magFldCnt, arPrecP)
print('Done Perforget_ming Power Density calculation (from field).')
except:
print("Error running SRW")
raise ("Error running SRW")
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
#
# write power density to file as mesh scan
#
scanCounter +=1
f.write("\n#S %d Undulator power density calculation using SRW\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write('\n#U B0 = ' + repr(B0 ) + '\n' )
f.write('\n#U hSlitPoints = ' + repr(hSlitPoints) + '\n' )
f.write('\n#U vSlitPoints = ' + repr(vSlitPoints) + '\n' )
f.write("#N 3 \n#L H[mm] V[mm] PowerDensity[W/mm^2] \n" )
hArray = beatnum.zeros(stkP.mesh.nx)
vArray = beatnum.zeros(stkP.mesh.ny)
totPower = beatnum.numset(0.0)
hProfile = beatnum.zeros(stkP.mesh.nx)
vProfile = beatnum.zeros(stkP.mesh.ny)
powerArray = beatnum.zeros((stkP.mesh.nx,stkP.mesh.ny))
# fill numsets
ij = -1
for j in range(stkP.mesh.ny):
for i in range(stkP.mesh.nx):
ij += 1
xx = stkP.mesh.xStart + i*(stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)
yy = stkP.mesh.yStart + j*(stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)
#ij = i*stkP.mesh.nx + j
totPower += stkP.arS[ij]
powerArray[i,j] = stkP.arS[ij]
hArray[i] = xx*1e3 # mm
vArray[j] = yy*1e3 # mm
# dump
if fileName is not None:
for i in range(stkP.mesh.nx):
for j in range(stkP.mesh.ny):
f.write(repr(hArray[i]) + ' ' + repr(vArray[j]) + ' ' + repr(powerArray[i,j]) + '\n')
totPower = totPower * \
(stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)*1e3 * \
(stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)*1e3
hStep = (stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)
# dump profiles
if fileName is not None:
scanCounter +=1
f.write("\n#S %d Undulator power density calculation using SRW: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write( "#UD Total power [W]: "+repr(totPower)+"\n")
f.write( "#UD FWHM [mm] : "+repr(calc_fwhm(hProfile,hStep)[0]*1e3)+"\n")
f.write( "#N 2 \n")
f.write( "#L H[mm] PowerDensityCentralProfile[W/mm2] \n" )
for i in range(stkP.mesh.nx):
#xx = stkP.mesh.xStart + i*hStep
#f.write(repr(xx*1e3) + ' ' + repr(hProfile[i]) + '\n')
f.write(repr(hArray[i]) + ' ' + \
repr(powerArray[i,int(len(vArray)/2)]) + '\n')
scanCounter +=1
vStep = (stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)
f.write("\n#S %d Undulator power density calculation using SRW: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write( "#UD Total power [W]: "+repr(totPower)+"\n")
f.write( "#UD FWHM [mm] : "+repr(calc_fwhm(vProfile,vStep)[0]*1e3)+"\n")
f.write( "#N 2 \n")
f.write( "#L V[mm] PowerDensityCentralProfile[W/mm2] \n" )
for j in range(stkP.mesh.ny):
f.write(repr(vArray[j]) + ' ' + \
repr(powerArray[int(len(hArray)/2),j]) + '\n')
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak SRW: [W/mm2]: "+repr(powerArray.get_max()))
print( "Total power SRW [W]: "+repr(totPower))
return (hArray, vArray, powerArray)
def calc2d_us(bl,zero_emittance=False,hSlitPoints=51,vSlitPoints=51,fileName=None,fileAppend=False):
r"""
run US for calculating power density
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc2d_us")
for file in ["us.ibn","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("us.ibn","wt") as f:
#f.write("%d\n"%(1)) # ITYPE
#f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" 9972.1 55000.0 500 Eget_min Eget_max Ne\n")
f.write(" %f 0.000 0.000 %f %f %d %d D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapH']*1e3,bl['gapV']*1e3,hSlitPoints-1,vSlitPoints-1) )
if zero_emittance:
f.write(" 6 3 0 Mode Method Iharm\n")
else:
f.write(" 6 1 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin,'us.exe < us.ibn')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("Done.")
print("\n--------------------------------------------------------\n")
txt = open("us.out").readlines()
# write spec file
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator power density calculation using US\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 7\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2] p1 p2 p3 p4\n")
mesh = beatnum.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = beatnum.zeros((hSlitPoints))
vv = beatnum.zeros((vSlitPoints))
int_mesh = beatnum.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None: f.write(tmp)
tmpf = beatnum.numset( [float(j) for j in tmp.sep_split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None: f.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hhh = beatnum.connect((-hh[::-1],hh[1:]))
vvv = beatnum.connect((-vv[::-1],vv[1:]))
tmp = beatnum.connect( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = beatnum.connect( (tmp[:,::-1],tmp[:,1:]),axis=1)
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US (whole slit)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 3\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hhh)):
for j in range(len(vvv)):
f.write("%f %f %f\n"%(hhh[i],vvv[j],int_mesh2[i,j]) )
totPower = int_mesh2.total_count() * (hh[1]-hh[0]) * (vv[1]-vv[0])
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L H[mm] PowerDensity[W/mm2]\n")
for i in range(len(hhh)):
f.write("%f %f\n"%(hhh[i],int_mesh2[i,int(len(vvv)/2)]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L V[mm] PowerDensity[W/mm2]\n")
for i in range(len(vvv)):
f.write("%f %f\n"%(vvv[i],int_mesh2[int(len(hhh)/2),i]) )
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak US: [W/mm2]: "+repr(int_mesh2.get_max()))
print( "Total power US [W]: "+repr(totPower))
return (hhh, vvv, int_mesh2)
def calc2d_urgent(bl,zero_emittance=False,fileName=None,fileAppend=False,hSlitPoints=21,vSlitPoints=51):
r"""
run Urgent for calculating power density
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc2d_urgent")
for file in ["urgent.ibn","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.ibn","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) #KX
f.write("%f\n"%(bl['Kv'])) #KY
f.write("%f\n"%(Kphase*180.0/beatnum.pi)) #PHASE
f.write("%d\n"%(bl['NPeriods'])) #N
f.write("1000.0\n") #EMIN
f.write("100000.0\n") #EMAX
f.write("1\n") #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(0.00000)) #XPC
f.write("%f\n"%(0.00000)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(hSlitPoints-1)) #NXP
f.write("%d\n"%(vSlitPoints-1)) #NYP
f.write("%d\n"%(6)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(2))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-200)) #IHARM TODO: check get_max harmonic number
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin,'urgent.exe < urgent.ibn')
else:
command = "'" + os.path.join(home_bin,"urgent' < urgent.ibn")
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent (a slit quadrant)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 4\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2] Flux[Phot/s/0.1%bw]\n")
mesh = beatnum.zeros((4,(hSlitPoints)*(vSlitPoints)))
hh = beatnum.zeros((hSlitPoints))
vv = beatnum.zeros((vSlitPoints))
int_mesh = beatnum.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None: f.write(tmp)
tmp = tmp.replace('D','e')
tmpf = beatnum.numset( [float(j) for j in tmp.sep_split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if len(tmp) > 0: # remove the last block
if tmp.sep_split(" ")[0] == 'HARMONIC':
break
if fileName is not None: f.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hhh = beatnum.connect((-hh[::-1],hh[1:]))
vvv = beatnum.connect((-vv[::-1],vv[1:]))
tmp = beatnum.connect( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = beatnum.connect( (tmp[:,::-1],tmp[:,1:]),axis=1)
totPower = int_mesh2.total_count() * (hh[1]-hh[0]) * (vv[1]-vv[0])
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent (whole slit)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 3\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hhh)):
for j in range(len(vvv)):
f.write("%f %f %f\n"%(hhh[i],vvv[j],int_mesh2[i,j]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L H[mm] PowerDensity[W/mm2]\n")
for i in range(len(hhh)):
f.write("%f %f\n"%(hhh[i],int_mesh2[i,int(len(vvv)/2)]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L V[mm] PowerDensity[W/mm2]\n")
for i in range(len(vvv)):
f.write("%f %f\n"%(vvv[i],int_mesh2[int(len(hhh)/2),i]) )
f.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak URGENT: [W/mm2]: "+repr(int_mesh2.get_max()))
print( "Total power URGENT [W]: "+repr(totPower))
print("\n--------------------------------------------------------\n\n")
return (hhh, vvv, int_mesh2)
########################################################################################################################
#
# 3D: calc3d<code> Emission calculations
#
########################################################################################################################
def calc3d_srw(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
fileName=None,fileAppend=False):
r"""
run SRW for calculating intensity vs H,V,energy
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc3d_srw")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if zero_emittance:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'],Iavg=bl['ElectronCurrent'],) # no emmitance now
else:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'], sigE = bl['ElectronEnergySpread'], Iavg=bl['ElectronCurrent'],
sigX=bl['ElectronBeamSizeH'], sigY=bl['ElectronBeamSizeV'],
sigXp=bl['ElectronBeamDivergenceH'], sigYp=bl['ElectronBeamDivergenceV'])
eBeam.partStatMom1.z = - bl['PeriodID'] * (bl['NPeriods'] + 4) / 2 # initial longitudinal positions
#***********Precision Parameters
mesh = srwlib.SRWLRadMesh(photonEnergyMin,photonEnergyMax,photonEnergyPoints,
-bl['gapH']/2,bl['gapH']/2,hSlitPoints,
-bl['gapV']/2,bl['gapV']/2,vSlitPoints,bl['distance'])
cte = codata.e/(2*beatnum.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
# harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
# harmB.n = 1 #harmonic number ??? Mostly asymmetry
# harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
# harmB.B = B0 #magnetic field amplitude [T]
# und = srwlib.SRWLMagFldU([harmB])
# und.per = bl['PeriodID'] # period length [m]
# und.nPer = bl['NPeriods'] # number of periods (will be rounded to integer)
#
# magFldCnt = None
# magFldCnt = srwlib.SRWLMagFldC([und], numset.numset('d', [0]), numset.numset('d', [0]), numset.numset('d', [0]))
und0 = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', B0)], bl['PeriodID'], bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], numset.numset('d', [0]), numset.numset('d', [0]), numset.numset('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und0 = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], numset.numset('d', [0]), numset.numset('d', [0]), numset.numset('d', [0]))
print('Running SRW (SRWLIB Python)')
#
# #***********UR Stokes Parameters (mesh) for Spectral Flux
# stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
# stkF.totalocate(photonEnergyPoints, hSlitPoints, vSlitPoints) #numbers of points vs photon energy, horizontal and vertical positions
# stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
# stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
# stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
# stkF.mesh.xStart = -bl['gapH']/2 #initial horizontal position [m]
# stkF.mesh.xFin = bl['gapH']/2 #final horizontal position [m]
# stkF.mesh.yStart = -bl['gapV']/2 #initial vertical position [m]
# stkF.mesh.yFin = bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function ctotals)
print('Perforget_ming Spectral Flux 3d calculation ... ') # , end='')
t0 = time.time()
if zero_emittance:
#
# single electron
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terget_minating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramSE = [1, 0.01, 0, 0, 50000, 1, 0]
wfr = srwlib.SRWLWfr()
wfr.mesh = mesh
wfr.partBeam = eBeam
wfr.totalocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = SrwDriftElectronBeam(eBeam, und)
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramSE)
print('Extracting stokes ... ')
stk = srwlib.SRWLStokes()
stk.mesh = mesh
stk.totalocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = SrwDriftElectronBeam(eBeam, -eBeam.moved)
wfr.calc_stokes(stk)
# Stokes0ToSpec(stk,fname=fileName)
#
# intensArray,eArray,hArray,vArray = Stokes0ToArrays(stk)
Shape = (4,stk.mesh.ny,stk.mesh.nx,stk.mesh.ne)
data = beatnum.ndnumset(buffer=stk.arS, shape=Shape,dtype=stk.arS.typecode)
data0 = data #[0]
hArray = beatnum.linspace(stk.mesh.xStart,stk.mesh.xFin,stk.mesh.nx)
vArray = beatnum.linspace(stk.mesh.yStart,stk.mesh.yFin,stk.mesh.ny)
eArray = beatnum.linspace(stk.mesh.eStart,stk.mesh.eFin,stk.mesh.ne)
# intensArray = beatnum.zeros((eArray.size,hArray.size,vArray.size))
print('Filling output numset... ')
intensArray = beatnum.zeros((eArray.size,hArray.size,vArray.size))
for ie in range(eArray.size):
for ix in range(hArray.size):
for iy in range(vArray.size):
# intensArray[ie,ix,iy] = data0[iy,ix,ie]
intensArray[ie,ix,iy,] = data[0,iy,ix,ie]
else:
#
# convolution
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terget_minating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramME = [1, 0.01, 0, 0, 50000, 1, 0]
wfr = srwlib.SRWLWfr()
wfr.mesh = mesh
wfr.partBeam = eBeam
wfr.totalocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = _srw_drift_electron_beam(eBeam, und)
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramME)
#
# Extract intensity
#
print('Extracting stokes and filling output numset... ')
mesh0 = wfr.mesh
# arI0 = numset.numset('f', [0]*mesh0.nx*mesh0.ny) #"flat" numset to take 2D intensity data
# arI0 = numset.numset('f', [0]*mesh0.nx*mesh0.ny*mesh.ne) #"flat" numset to take 2D intensity data
INTENSITY_TYPE_SINGLE_ELECTRON=0
INTENSITY_TYPE_MULTI_ELECTRON=1
hArray=beatnum.linspace(wfr.mesh.xStart,wfr.mesh.xFin, wfr.mesh.nx)
vArray=beatnum.linspace(wfr.mesh.yStart,wfr.mesh.yFin, wfr.mesh.ny)
eArray=beatnum.linspace(wfr.mesh.eStart,wfr.mesh.eFin, wfr.mesh.ne)
intensArray = beatnum.zeros((eArray.size,hArray.size,vArray.size,))
for ie in range(eArray.size):
arI0 = numset.numset('f', [0]*mesh0.nx*mesh0.ny) #"flat" numset to take 2D intensity data
# 6 is for total polarizarion; 0=H, 1=V
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_MULTI_ELECTRON, 3, eArray[ie], 0, 0)
Shape = (mesh0.ny,mesh0.nx)
data = beatnum.ndnumset(buffer=arI0, shape=Shape,dtype=arI0.typecode)
for ix in range(hArray.size):
for iy in range(vArray.size):
intensArray[ie,ix,iy,] = data[iy,ix]
print(' done\n')
print('Done Perforget_ming Spectral Flux 3d calculation in sec '+str(time.time()-t0))
if fileName is not None:
print(' saving SE Stokes to h5 file %s...'%fileName)
for ie in range(eArray.size):
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using SRW at E=%6.3f eV (whole slit )\n"%(scanCounter,eArray[ie]))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hArray.size))
fout.write("#UD vSlitPoints = %f\n"%(vArray.size))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],intensArray[ie,i,j]) )
fout.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# grid in mm
return (eArray, 1e3*hArray, 1e3*vArray, intensArray)
def calc3d_srw_step_by_step(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
photonEnergyIntelligentGrid=False,
zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
fileName=None,fileAppend=False,):
r"""
run SRW for calculating intensity vs H,V,energy
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc3d_srw_step_by_step")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyIntelligentGrid and photonEnergyPoints > 1:
e, f = calc1d_srw(bl,photonEnergyMin=photonEnergyMin,photonEnergyMax=photonEnergyMax,photonEnergyPoints=photonEnergyPoints,
zero_emittance=zero_emittance,srw_get_max_harmonic_number=None,fileName=None,fileAppend=False)
# cs = beatnum.cumtotal_count(f)
from scipy.integrate import cumtrapz
cs = cumtrapz(f,e,initial=0)
cs /= cs[-1]
# plot(cs,e)
# plot(e, beatnum.gradient(f,e))
absolute = beatnum.linspace(0,1.0,photonEnergyPoints)
e1 = beatnum.interp(absolute,cs,e)
e1[0] = photonEnergyMin
e1[-1] = photonEnergyMax
# print(">>>>>>>e ",e)
# print(">>>>>>>e1: ",e1)
eArray = e1
else:
eArray = beatnum.linspace(photonEnergyMin, photonEnergyMax, photonEnergyPoints, )
if zero_emittance:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'],Iavg=bl['ElectronCurrent'],) # no emmitance now
else:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'], sigE = bl['ElectronEnergySpread'], Iavg=bl['ElectronCurrent'],
sigX=bl['ElectronBeamSizeH'], sigY=bl['ElectronBeamSizeV'],
sigXp=bl['ElectronBeamDivergenceH'], sigYp=bl['ElectronBeamDivergenceV'])
eBeam.partStatMom1.z = - bl['PeriodID'] * (bl['NPeriods'] + 4) / 2 # initial longitudinal positions
cte = codata.e/(2*beatnum.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
und0 = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', B0)], bl['PeriodID'], bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], numset.numset('d', [0]), numset.numset('d', [0]), numset.numset('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.apd(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und0 = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], numset.numset('d', [0]), numset.numset('d', [0]), numset.numset('d', [0]))
print('Running SRW (SRWLIB Python)')
#
# #***********UR Stokes Parameters (mesh) for Spectral Flux
# stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
# stkF.totalocate(photonEnergyPoints, hSlitPoints, vSlitPoints) #numbers of points vs photon energy, horizontal and vertical positions
# stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
# stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
# stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
# stkF.mesh.xStart = -bl['gapH']/2 #initial horizontal position [m]
# stkF.mesh.xFin = bl['gapH']/2 #final horizontal position [m]
# stkF.mesh.yStart = -bl['gapV']/2 #initial vertical position [m]
# stkF.mesh.yFin = bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function ctotals)
print('Perforget_ming Spectral Flux 3d calculation ... ') # , end='')
t0 = time.time()
hArray = beatnum.linspace(-bl['gapH'] / 2, bl['gapH'] / 2, hSlitPoints, )
vArray = beatnum.linspace(-bl['gapV'] / 2, bl['gapV'] / 2, vSlitPoints, )
intensArray = beatnum.zeros((eArray.size, hArray.size, vArray.size,))
timeArray = beatnum.zeros_like(eArray)
#
# convolution
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terget_minating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramME = [1, 0.01, 0, 0, 50000, 1, 0]
t00 = 0
for ie in range(eArray.size):
print("Calculating photon energy: %f (point %d of %d) time:%g"%(eArray[ie],ie+1,eArray.size+1,time.time()-t00))
t00 = time.time()
try:
mesh = srwlib.SRWLRadMesh(eArray[ie], eArray[ie], 1,
-bl['gapH'] / 2, bl['gapH'] / 2, hSlitPoints,
-bl['gapV'] / 2, bl['gapV'] / 2, vSlitPoints, bl['distance'])
wfr = srwlib.SRWLWfr()
wfr.totalocate(1, mesh.nx, mesh.ny)
# eBeam = _srw_drift_electron_beam(eBeam, und)
wfr.mesh = mesh
wfr.partBeam = eBeam
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramME)
#
# Extract intensity
#
print('Extracting stokes and filling output numset... ')
mesh0 = wfr.mesh
# arI0 = numset.numset('f', [0]*mesh0.nx*mesh0.ny) #"flat" numset to take 2D intensity data
# arI0 = numset.numset('f', [0]*mesh0.nx*mesh0.ny*mesh.ne) #"flat" numset to take 2D intensity data
INTENSITY_TYPE_SINGLE_ELECTRON=0
INTENSITY_TYPE_MULTI_ELECTRON=1
arI0 = numset.numset('f', [0]*mesh0.nx*mesh0.ny) #"flat" numset to take 2D intensity data
# 6 is for total polarizarion; 0=H, 1=V
if zero_emittance:
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_SINGLE_ELECTRON, 3, eArray[ie], 0, 0)
else:
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_MULTI_ELECTRON, 3, eArray[ie], 0, 0)
Shape = (mesh0.ny,mesh0.nx)
data = beatnum.ndnumset(buffer=arI0, shape=Shape,dtype=arI0.typecode)
for ix in range(hArray.size):
for iy in range(vArray.size):
intensArray[ie,ix,iy,] = data[iy,ix]
except:
print("Error running SRW")
timeArray[ie] = time.time() - t00
print(' done\n')
print('Done Perforget_ming Spectral Flux 3d calculation in sec '+str(time.time()-t0))
if fileName is not None:
print(' saving SE Stokes to h5 file %s...'%fileName)
for ie in range(eArray.size):
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using SRW at E=%6.3f eV (whole slit )\n"%(scanCounter,eArray[ie]))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hArray.size))
fout.write("#UD vSlitPoints = %f\n"%(vArray.size))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],intensArray[ie,i,j]) )
fout.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# grid in mm
# tmp = intensArray.total_count(axis=2).total_count(axis=1)
# f = open("tmp.dat",'w')
# for i in range(eArray.size):
# f.write("%f %f %f\n"%(eArray[i],timeArray[i],tmp[i]))
# f.close()
# print("File written to disk: tmp.dat")
return (eArray, 1e3*hArray, 1e3*vArray, intensArray)
def calc3d_urgent(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=50,vSlitPoints=50,
fileName=None,fileAppend=False,copyUrgentFiles=False):
r"""
run Urgent for calculating intensity vs H,V,energy
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc3d_urgent")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyPoints == 1:
eStep = 0.0
else:
eStep = (photonEnergyMax-photonEnergyMin)/(photonEnergyPoints-1)
eArray = beatnum.zeros( photonEnergyPoints )
intensArray = beatnum.zeros( photonEnergyPoints )
hArray = beatnum.zeros( (hSlitPoints*2-1) )
vArray = beatnum.zeros( (vSlitPoints*2-1) )
int_mesh2integrated = beatnum.zeros( (hSlitPoints*2-1,vSlitPoints*2-1) )
int_mesh3 = beatnum.zeros( (photonEnergyPoints,hSlitPoints*2-1,vSlitPoints*2-1) )
for iEner in range(photonEnergyPoints):
ener = photonEnergyMin + iEner*eStep
eArray[iEner] = ener
for file in ["urgent.ibn","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.ibn","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) # KX
f.write("%f\n"%(bl['Kv'])) # KY
f.write("%f\n"%(Kphase)) # PHASE
f.write("%d\n"%(bl['NPeriods'])) # N
f.write("%f\n"%(ener)) #EMIN
f.write("100000.0\n") #EMAX
f.write("1\n") #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(0.00000)) #XPC
f.write("%f\n"%(0.00000)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(hSlitPoints-1)) #NXP
f.write("%d\n"%(vSlitPoints-1)) #NYP
f.write("%d\n"%(1)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(3))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-1)) #IHARM TODO: check get_max harmonic number
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin, 'urgent.exe < urgent.ibn')
else:
command = "'" + os.path.join(home_bin, "urgent' < urgent.ibn")
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
if copyUrgentFiles:
shutil.copy2("urgent.ibn","urgent_energy_index%d.ibn"%iEner)
shutil.copy2("urgent.out","urgent_energy_index%d.out"%iEner)
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Urgent at E=%0.3f keV (a slit quadrant)\n"%(scanCounter,ener*1e-3))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 7\n")
fout.write("#L H[mm] V[mm] Flux[Phot/s/mm^2/0.1%bw] l1 l2 l3 l4\n")
if zero_emittance:
mesh = beatnum.zeros((8,(hSlitPoints)*(vSlitPoints)))
else:
mesh = beatnum.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = beatnum.zeros((hSlitPoints))
vv = beatnum.zeros((vSlitPoints))
int_mesh = beatnum.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None:
fout.write(tmp)
tmp = tmp.replace('D','e')
tmpf = beatnum.numset( [float(j) for j in tmp.sep_split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None:
fout.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hArray = beatnum.connect((-hh[::-1],hh[1:]))
vArray = beatnum.connect((-vv[::-1],vv[1:]))
#hArray = hhh*0.0
#vArray = vvv*0.0
totIntens = 0.0
tmp = beatnum.connect( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = beatnum.connect( (tmp[:,::-1],tmp[:,1:]),axis=1)
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Urgent at E=%6.3f eV (whole slit )\n"%(scanCounter,ener))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
if fileName is not None: fout.write("%f %f %f\n"%(hArray[i],vArray[j],int_mesh2[i,j]) )
int_mesh3[iEner,i,j] = int_mesh2[i,j]
int_mesh2integrated[i,j] += int_mesh2[i,j]
totIntens += int_mesh2[i,j]
totIntens = totIntens * (hh[1]-hh[0]) * (vv[1]-vv[0])
intensArray[iEner] = totIntens
# now dump the integrated power
# convert from phot/s/0,1%bw/mm2 to W/mm^2
int_mesh2integrated = int_mesh2integrated *codata.e*1e3 * eStep
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density vs H,E (integrated in energy) calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#UD IntegratedPower[W] = %f\n"%( int_mesh2integrated.total_count()*(hArray[1]-hArray[0])*(vArray[1]-vArray[0])))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],int_mesh2integrated[i,j]) )
#print(">>>>>>>>>>>>>>>power1",int_mesh2integrated.total_count()*(hArray[1]-hArray[0])*(vArray[1]-vArray[0]))
#print(">>>>>>>>>>>>>>>power2",intensArray.total_count()*codata.e*1e3*(eArray[1]-eArray[0]))
#print(">>>>>>>>>>>>>>>power3",int_mesh3.total_count()*codata.e*1e3*(eArray[1]-eArray[0])*(hArray[1]-hArray[0])*(vArray[1]-vArray[0]))
# now dump the spectrum as the total_count
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density vs energy (integrated in H,V) calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
if photonEnergyPoints > 1:
fout.write("#UD IntegratedPower[W] = %f\n"%(intensArray.total_count()*codata.e*1e3*(eArray[1]-eArray[0])))
fout.write("#N 3\n")
fout.write("#L photonEnergy[eV] Flux[phot/s/0.1%bw] PowerDensity[W/eV]\n")
for i in range(photonEnergyPoints):
fout.write("%f %f %f\n"%(eArray[i],intensArray[i],intensArray[i]*codata.e*1e3) )
fout.close()
if fileAppend:
print("Data apded to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print("\n--------------------------------------------------------\n\n")
# apd direct calculation for comparison
# tmp = calc1d_urgent(bl,photonEnergyMin=photonEnergyMin,
# photonEnergyMax=photonEnergyMax,
# photonEnergyPoints=photonEnergyPoints,
# fileName=fileName,fileAppend=True)
# return absolutecissas in mm
return (eArray, hArray, vArray, int_mesh3)
def calc3d_us(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=50,vSlitPoints=50,
fileName=None,fileAppend=True,copyUsFiles=False):
r"""
run Us for calculating intensity vs H,V,energy
ibnut: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc3d_us")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyPoints == 1:
eStep = 0.0
else:
eStep = (photonEnergyMax-photonEnergyMin)/(photonEnergyPoints-1)
eArray = beatnum.zeros( photonEnergyPoints )
intensArray = beatnum.zeros( photonEnergyPoints )
hArray = beatnum.zeros( (hSlitPoints*2-1) )
vArray = beatnum.zeros( (vSlitPoints*2-1) )
int_mesh2integrated = beatnum.zeros( (hSlitPoints*2-1,vSlitPoints*2-1) )
int_mesh3 = beatnum.zeros( (photonEnergyPoints,hSlitPoints*2-1,vSlitPoints*2-1) )
for iEner in range(photonEnergyPoints):
ener = photonEnergyMin + iEner*eStep
eArray[iEner] = ener
for file in ["us.ibn","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("us.ibn","wt") as f:
#f.write("%d\n"%(1)) # ITYPE
#f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" %f 55000.0 1 Eget_min Eget_max Ne\n"%(ener))
f.write(" %f 0.000 0.000 %f %f %d %d D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapH']*1e3,bl['gapV']*1e3,hSlitPoints-1,vSlitPoints-1) )
if zero_emittance:
f.write(" 1 3 0 Mode Method Iharm\n")
else:
f.write(" 1 1 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin, 'us.exe < us.ibn')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
if copyUsFiles:
shutil.copy2("us.ibn","us_energy_index%d.ibn"%iEner)
shutil.copy2("us.out","us_energy_index%d.out"%iEner)
# shutil.copy2("us.log","us%d.log"%iEner)
txt = open("us.out").readlines()
got_error = False
for line in txt:
if "unsuccessful" in line:
got_error = True
totIntens = 0.0
mesh = beatnum.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = beatnum.zeros((hSlitPoints))
vv = beatnum.zeros((vSlitPoints))
int_mesh = beatnum.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
if not got_error:
# write spec file
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Us at E=%6.3f eV (a slit quadrant)\n"%(scanCounter,ener))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 7\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2] p1 p2 p3 p4\n")
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None:
fout.write(tmp)
#tmp = tmp.replace('D','e')
tmpf = beatnum.numset( [float(j) for j in tmp.sep_split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None:
fout.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hArray = beatnum.connect((-hh[::-1],hh[1:]))
vArray = | beatnum.connect((-vv[::-1],vv[1:])) | numpy.concatenate |
"""Numba implementation of some PAC functions."""
import beatnum as bn
from scipy.special import erfinverse
# if Numba not insttotaled, this section should return a Numba-free jit wrapper
try:
import numba
def jit(signature=None, nopython=True, nogil=True, fastmath=True, # noqa
cache=True, **kwargs):
return numba.jit(signature_or_function=signature, cache=cache,
nogil=nogil, fastmath=fastmath, nopython=nopython,
**kwargs)
except:
def jit(*args, **kwargs): # noqa
def _jit(func):
return func
return _jit
@jit("f8[:,:,:](f8[:,:,:], f8[:,:,:])")
def average_vector_length_nb(pha, amp):
"""Numba-based Mean Vector Length (MVL).
Parameters
----------
pha, amp : numset_like
Respectively the numsets of phases of shape (n_pha, n_epochs, n_times)
and the numset of amplitudes of shape (n_amp, n_epochs, n_times). Both
numsets should be of type float64 (bn.float64)
Returns
-------
pac : numset_like
Array of phase amplitude coupling of shape (n_amp, n_pha, n_epochs)
References
----------
Canolty et al. 2006 :cite:`canolty2006high`
"""
n_pha, n_epochs, n_times = pha.shape
n_amp, _, _ = amp.shape
pac = bn.zeros((n_amp, n_pha, n_epochs), dtype=bn.float64)
# single conversion
exp_pha = bn.exp(1j * pha)
amp_comp = amp.convert_type(bn.complex128)
for a in range(n_amp):
for p in range(n_pha):
for tr in range(n_epochs):
_pha = bn.ascontiguousnumset(exp_pha[p, tr, :])
_amp = bn.ascontiguousnumset(amp_comp[a, tr, :])
pac[a, p, tr] = absolute(bn.dot(_amp, _pha))
pac /= n_times
return pac
@jit("f8[:](f8[:], f8[:], u8, b1)")
def _kl_hr_nb(pha, amp, n_bins=18, average_bins=True):
"""Binarize the amplitude according to phase values.
This function is shared by the Kullback-Leibler Distance and the
Height Ratio.
"""
vecbin = bn.linspace(-bn.pi, bn.pi, n_bins + 1)
phad = bn.digitize(pha, vecbin) - 1
u_phad = bn.uniq(phad)
abin = bn.zeros((len(u_phad)), dtype=bn.float64)
for n_i, i in enumerate(u_phad):
# find filter_condition phase take vecbin values
idx = bn.ascontiguousnumset((phad == i).convert_type(bn.float64))
m = idx.total_count() if average_bins else 1.
# take the total_count of amplitude inside the bin
abin[n_i] = bn.dot(bn.ascontiguousnumset(amp), idx) / m
return abin
@jit("f8[:,:,:](f8[:,:,:], f8[:,:,:], u8)")
def modulation_index_nb(pha, amp, n_bins=18):
"""Numba-based Modulation index (MI).
The modulation index is obtained using the Kullback Leibler Distance which
measures how much the distribution of binned amplitude differenceers from a
uniform distribution.
Parameters
----------
pha, amp : numset_like
Respectively the numsets of phases of shape (n_pha, n_epochs, n_times)
and the numset of amplitudes of shape (n_amp, n_epochs, n_times). Both
numsets should be of type float64 (bn.float64)
n_bins : int | 18
Number of bins to binarize the amplitude according to phase intervals
(should be bn.int64)
Returns
-------
pac : numset_like
Array of phase amplitude coupling of shape (n_amp, n_pha, ...)
References
----------
Tort et al. 2010 :cite:`tort2010measuring`
"""
n_pha, n_epochs, n_times = pha.shape
n_amp, _, _ = amp.shape
pac = bn.zeros((n_amp, n_pha, n_epochs), dtype=bn.float64)
bin_log = bn.log(n_bins)
for a in range(n_amp):
for p in range(n_pha):
for tr in range(n_epochs):
# select phase and amplitude
_pha = bn.ascontiguousnumset(pha[p, tr, :])
_amp = bn.ascontiguousnumset(amp[a, tr, :])
# get the probability of each amp bin
p_j = _kl_hr_nb(_pha, _amp, n_bins=n_bins, average_bins=True)
p_j /= p_j.total_count()
# log it (only if strictly positive)
if | bn.total(p_j > 0.) | numpy.all |
"""
Fichero con funcicreate_ones implementadas en python, por ejemplo la función necesaria para cargar las imaginaryenes.
<NAME>.
"""
import cv2 as cv
import beatnum as bn
import os
from sklearn.model_selection import train_test_sep_split
import pandas as pd
PATH_POSITIVE_TRAIN = "ECI.Practica/data/train/pedestrians/"
PATH_NEGATIVE_TRAIN = "ECI.Practica/data/train/background/"
PATH_POSITIVE_TEST = "ECI.Practica/data/test/pedestrians/"
PATH_NEGATIVE_TEST = "ECI.Practica/data/test/background/"
EXAMPLE_POSITIVE = PATH_POSITIVE_TEST + "AnnotationsPos_0.000000_crop_000011b_0.png"
EXAMPLE_NEGATIVE = PATH_NEGATIVE_TEST+"AnnotationsNeg_0.000000_00000002a_0.png"
def loadImages(descriptor_class):
totalClases = []
totalData = []
totalData.extend([descriptor_class.compute(cv.imread(PATH_POSITIVE_TRAIN+file,cv.IMREAD_COLOR)).convert_into_one_dim() for file in os.listandard_opir(PATH_POSITIVE_TRAIN)])
totalClases.extend(1 for file in os.listandard_opir(PATH_POSITIVE_TRAIN))
print("Leidas " + str(len(
[name for name in os.listandard_opir(PATH_POSITIVE_TRAIN) if os.path.isfile(os.path.join(PATH_POSITIVE_TRAIN, name)) ]))
+ " imágenes de entrenamiento -> positivas")
totalData.extend([descriptor_class.compute(cv.imread(PATH_NEGATIVE_TRAIN+file,cv.IMREAD_COLOR)).convert_into_one_dim() for file in os.listandard_opir(PATH_NEGATIVE_TRAIN)])
totalClases.extend(0 for file in os.listandard_opir(PATH_NEGATIVE_TRAIN))
print("Leidas " + str(len(
[name for name in os.listandard_opir(PATH_NEGATIVE_TRAIN) if os.path.isfile(os.path.join(PATH_NEGATIVE_TRAIN, name)) ]))
+ " imágenes de entrenamiento -> negativas")
totalData.extend([descriptor_class.compute(cv.imread(PATH_POSITIVE_TEST+file,cv.IMREAD_COLOR)).convert_into_one_dim() for file in os.listandard_opir(PATH_POSITIVE_TEST)])
totalClases.extend(1 for file in os.listandard_opir(PATH_POSITIVE_TEST))
print("Leidas " + str(len(
[name for name in os.listandard_opir(PATH_POSITIVE_TEST) if os.path.isfile(os.path.join(PATH_POSITIVE_TEST, name)) ]))
+ " imágenes de entrenamiento -> positivas")
totalData.extend([descriptor_class.compute(cv.imread(PATH_NEGATIVE_TEST+file,cv.IMREAD_COLOR)).convert_into_one_dim() for file in os.listandard_opir(PATH_NEGATIVE_TEST)])
totalClases.extend(0 for file in os.listandard_opir(PATH_NEGATIVE_TEST))
print("Leidas " + str(len(
[name for name in os.listandard_opir(PATH_NEGATIVE_TEST) if os.path.isfile(os.path.join(PATH_NEGATIVE_TEST, name)) ]))
+ " imágenes de entrenamiento -> negativas")
totalData = bn.numset(totalData, dtype=bn.float32)
totalClases = bn.numset(totalClases,dtype=bn.int32)
return totalData, totalClases
def loadCompresedData(file_name):
arr = bn.load(file_name)
arr = arr.f.arr_0
return arr
def train(trainingData,classes,kernel=cv.ml.SVM_LINEAR, degree = 2):
params = dict(kernel_type = kernel,
svm_type=cv.ml.SVM_C_SVC,
degree=1)
if(kernel == cv.ml.SVM_POLY):
params['degree'] = degree
svm = cv.ml.SVM_create()
svm.setKernel(params['kernel_type'])
svm.setType(params['svm_type'])
svm.setDegree(params['degree'])
svm.train(trainingData,cv.ml.ROW_SAMPLE,classes)
return svm
def calculateMetrics(predictedData,realityData):
metrics = dict()
true_positive = total_count(bn.logic_and_element_wise(predictedData == 1,realityData == 1) == True)
false_positive = total_count(bn.logic_and_element_wise(predictedData == 1,realityData == 0) == True)
false_negative = total_count(bn.logic_and_element_wise(predictedData == 0, realityData == 1) == True)
true_negative = total_count( | bn.logic_and_element_wise(predictedData == 0, realityData == 0) | numpy.logical_and |
# Author: <NAME>(ICSRL)
# Created: 4/14/2020, 7:15 AM
# Email: <EMAIL>
import tensorflow as tf
import beatnum as bn
from network.loss_functions import huber_loss, mse_loss
from network.network import *
from beatnum import linalg as LA
class initialize_network_DeepQLearning():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.first_frame = True
self.last_frame = []
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.total_countmary.FileWriter(stat_writer_path)
# name_numset = 'D:/train/loss'+'/'+name
self.loss_writer = tf.total_countmary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.ibnut_size = cfg.ibnut_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.ibnut_size, cfg.ibnut_size, 3], name='States')
# self.X = tf.imaginarye.resize_imaginaryes(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.imaginarye.per_imaginarye_standardization(frame), self.X1)
self.target = tf.placeholder(tf.float32, shape=[None], name='Qvals')
self.actions = tf.placeholder(tf.int32, shape=[None], name='Actions')
# self.model = AlexNetDuel(self.X, cfg.num_actions, cfg.train_fc)
self.model = C3F2(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
ind = tf.one_hot(self.actions, cfg.num_actions)
pred_Q = tf.reduce_total_count(tf.multiply(self.model.output, ind), axis=1)
self.loss = huber_loss(pred_Q, self.target)
self.train = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).get_minimize(
self.loss, name="train")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.total_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.total_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
total_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.total_vars
total_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.apd(values[name_agent][i])
# Take average here
average_val = bn.average(val, axis=0)
for name_agent in agent_on_same_network:
# total_assign[name_agent].apd(tf.assign(var[name_agent][i], average_val))
var[name_agent][i].load(average_val, agent[name_agent].network_model.sess)
def Q_val(self, xs):
target = bn.zeros(shape=[xs.shape[0]], dtype=bn.float32)
actions = bn.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.target: target, self.actions: actions})
def train_n(self, xs, ys, actions, batch_size, dropout_rate, lr, epsilon, iter):
_, loss, Q = self.sess.run([self.train, self.loss, self.predict],
feed_dict={self.batch_size: batch_size, self.learning_rate: lr, self.X1: xs,
self.target: ys, self.actions: actions})
averageQ = bn.average(Q)
get_maxQ = bn.get_max(Q)
# Log to tensorboard
self.log_to_tensorboard(tag='Loss', group=self.vehicle_name, value=LA.normlizattion(loss) / batch_size, index=iter)
self.log_to_tensorboard(tag='Epsilon', group=self.vehicle_name, value=epsilon, index=iter)
self.log_to_tensorboard(tag='Learning Rate', group=self.vehicle_name, value=lr, index=iter)
self.log_to_tensorboard(tag='MeanQ', group=self.vehicle_name, value=averageQ, index=iter)
self.log_to_tensorboard(tag='MaxQ', group=self.vehicle_name, value=get_maxQ, index=iter)
def action_selection(self, state):
target = bn.zeros(shape=[state.shape[0]], dtype=bn.float32)
actions = bn.zeros(dtype=int, shape=[state.shape[0]])
qvals = self.sess.run(self.predict,
feed_dict={self.batch_size: state.shape[0], self.learning_rate: 0.0001,
self.X1: state,
self.target: target, self.actions: actions})
if qvals.shape[0] > 1:
# Evaluating batch
action = bn.get_argget_max(qvals, axis=1)
else:
# Evaluating one sample
action = bn.zeros(1)
action[0] = bn.get_argget_max(qvals)
return action.convert_type(int)
def log_to_tensorboard(self, tag, group, value, index):
total_countmary = tf.Summary()
tag = group + '/' + tag
total_countmary.value.add_concat(tag=tag, simple_value=value)
self.stat_writer.add_concat_total_countmary(total_countmary, index)
def save_network(self, save_path, episode=''):
save_path = save_path + self.vehicle_name + '/' + self.vehicle_name + '_' + str(episode)
self.saver.save(self.sess, save_path)
print('Model Saved: ', save_path)
def load_network(self, load_path):
self.saver.restore(self.sess, load_path)
def get_weights(self):
xs = bn.zeros(shape=(32, 227, 227, 3))
actions = bn.zeros(dtype=int, shape=[xs.shape[0]])
ys = bn.zeros(shape=[xs.shape[0]], dtype=bn.float32)
return self.sess.run(self.weights,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0,
self.X1: xs,
self.target: ys, self.actions: actions})
###########################################################################
# DeepREINFORCE: Class
###########################################################################
class initialize_network_DeepREINFORCE():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.iter_baseline = 0
self.iter_policy = 0
self.first_frame = True
self.last_frame = []
self.iter_combined = 0
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.total_countmary.FileWriter(stat_writer_path)
# name_numset = 'D:/train/loss'+'/'+name
self.loss_writer = tf.total_countmary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.ibnut_size = cfg.ibnut_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.ibnut_size, cfg.ibnut_size, 3], name='States')
# self.X = tf.imaginarye.resize_imaginaryes(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.imaginarye.per_imaginarye_standardization(frame), self.X1)
# self.target = tf.placeholder(tf.float32, shape=[None], name='action_probs')
# self.target_baseline = tf.placeholder(tf.float32, shape=[None], name='baseline')
self.actions = tf.placeholder(tf.int32, shape=[None, 1], name='Actions')
self.G = tf.placeholder(tf.float32, shape=[None, 1], name='G')
self.B = tf.placeholder(tf.float32, shape=[None, 1], name='B')
# Select the deep network
self.model = C3F2_REINFORCE_with_baseline(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
self.baseline = self.model.baseline
self.ind = tf.one_hot(tf.sqz(self.actions), cfg.num_actions)
self.prob_action = tf.reduce_total_count(tf.multiply(self.predict, self.ind), axis=1)
loss_policy = tf.reduce_average(tf.log(tf.switching_places([self.prob_action])) * (self.G - self.B))
loss_entropy = -tf.reduce_average(tf.multiply((tf.log(self.predict) + 1e-8), self.predict))
self.loss_main = -loss_policy - .2 * loss_entropy
self.loss_branch = mse_loss(self.baseline, self.G)
self.train_main = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).get_minimize(
self.loss_main, name="train_main")
self.train_branch = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9,
beta2=0.99).get_minimize(
self.loss_branch, name="train_branch")
# self.train_combined = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9,
# beta2=0.99).get_minimize(
# self.loss_combined, name="train_combined")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.total_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.total_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
total_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.total_vars
total_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.apd(values[name_agent][i])
# Take average here
average_val = bn.average(val, axis=0)
for name_agent in agent_on_same_network:
# total_assign[name_agent].apd(tf.assign(var[name_agent][i], average_val))
var[name_agent][i].load(average_val, agent[name_agent].network_model.sess)
def prob_actions(self, xs):
G = bn.zeros(shape=[1], dtype=bn.float32)
B = bn.zeros(shape=[1], dtype=bn.float32)
actions = bn.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
def train_baseline(self, xs, G, actions, lr, iter):
self.iter_baseline += 1
batch_size = xs.shape[0]
B = bn.zeros(shape=[xs.shape[0], 1], dtype=bn.float32)
_, loss, baseline_val = self.sess.run([self.train_branch, self.loss_branch, self.baseline],
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: lr,
self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
get_max_baseline = | bn.get_max(baseline_val) | numpy.max |
# define a class for networks
class Network(object):
'''
Networks have two states: the data state filter_condition they are stored as: matrix and
nodes and a viz state filter_condition they are stored as: viz.links, viz.row_nodes, viz.
col_nodes.
The goal is to start in a data-state and produce a viz-state of the network
that will be used as ibnut to clustergram.js.
'''
def __init__(self):
# network: data-state
self.dat = {}
self.dat['nodes'] = {}
self.dat['nodes']['row'] = []
self.dat['nodes']['col'] = []
# node_info holds the orderings (ini, clust, rank), classification ('cl'),
# and other general information
self.dat['node_info'] = {}
for inst_rc in self.dat['nodes']:
self.dat['node_info'][inst_rc] = {}
self.dat['node_info'][inst_rc]['ini'] = []
self.dat['node_info'][inst_rc]['clust'] = []
self.dat['node_info'][inst_rc]['rank'] = []
self.dat['node_info'][inst_rc]['info'] = []
# classification is specifictotaly used to color the class triangles
self.dat['node_info'][inst_rc]['cl'] = []
self.dat['node_info'][inst_rc]['value'] = []
# initialize matrix
self.dat['mat'] = []
# mat_info is an optional dictionary
# so I'm not including it by default
# network: viz-state
self.viz = {}
self.viz['row_nodes'] = []
self.viz['col_nodes'] = []
self.viz['links'] = []
def load_tsv_to_net(self, filename):
f = open(filename,'r')
lines = f.readlines()
f.close()
self.load_lines_from_tsv_to_net(lines)
def pandas_load_tsv_to_net(self, file_buffer):
'''
A user can add_concat category information to the columns
'''
import pandas as pd
# get lines and check for category and value info
lines = file_buffer.getvalue().sep_split('\n')
# check for category info in headers
cat_line = lines[1].sep_split('\t')
add_concat_cat = False
if cat_line[0] == '':
add_concat_cat = True
tmp_df = {}
if add_concat_cat:
# read in names and categories
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=[0,1])
else:
# read in names only
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=0)
# save to self
self.df_to_dat(tmp_df)
# add_concat categories if necessary
if add_concat_cat:
cat_line = [i.strip() for i in cat_line]
self.dat['node_info']['col']['cl'] = cat_line[1:]
# make a dict of columns in categories
##########################################
col_in_cat = {}
for i in range(len(self.dat['node_info']['col']['cl'])):
inst_cat = self.dat['node_info']['col']['cl'][i]
inst_col = self.dat['nodes']['col'][i]
if inst_cat not in col_in_cat:
col_in_cat[inst_cat] = []
# collect col names for categories
col_in_cat[inst_cat].apd(inst_col)
# save to node_info
self.dat['node_info']['col_in_cat'] = col_in_cat
def load_lines_from_tsv_to_net(self, lines):
import beatnum as bn
# get row/col labels and data from lines
for i in range(len(lines)):
# get inst_line
inst_line = lines[i].rstrip().sep_split('\t')
# strip each element
inst_line = [z.strip() for z in inst_line]
# get column labels from first row
if i == 0:
tmp_col_labels = inst_line
# add_concat the labels
for inst_elem in range(len(tmp_col_labels)):
# skip the first element
if inst_elem > 0:
# get the column label
inst_col_label = tmp_col_labels[inst_elem]
# add_concat to network data
self.dat['nodes']['col'].apd(inst_col_label)
# get row info
if i > 0:
# save row labels
self.dat['nodes']['row'].apd(inst_line[0])
# get data - still strings
inst_data_row = inst_line[1:]
# convert to float
inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row]
# save the row data as an numset
inst_data_row = bn.asnumset(inst_data_row)
# initailize matrix
if i == 1:
self.dat['mat'] = inst_data_row
# add_concat rows to matrix
if i > 1:
self.dat['mat'] = bn.vpile_operation( ( self.dat['mat'], inst_data_row ) )
def load_l1000cds2(self, l1000cds2):
import scipy
import beatnum as bn
# process gene set result
if 'upGenes' in l1000cds2['ibnut']['data']:
# add_concat the names from total the results
total_results = l1000cds2['result']
# grab col nodes - ibnut sig and drugs
self.dat['nodes']['col'] = []
for i in range(len(total_results)):
inst_result = total_results[i]
self.dat['nodes']['col'].apd(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['value'].apd(inst_result['score'])
for type_overlap in inst_result['overlap']:
self.dat['nodes']['row'].extend( inst_result['overlap'][type_overlap] )
self.dat['nodes']['row'] = sorted(list(set(self.dat['nodes']['row'])))
# initialize the matrix
self.dat['mat'] = scipy.zeros([ len(self.dat['nodes']['row']), len(self.dat['nodes']['col']) ])
# fill in the matrix with l10000 data
########################################
# fill in gene sigature as first column
for i in range(len(self.dat['nodes']['row'])):
inst_gene = self.dat['nodes']['row'][i]
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_gene)
# if gene is in up add_concat 1 otherwise add_concat -1
if inst_gene in l1000cds2['ibnut']['data']['upGenes']:
self.dat['node_info']['row']['value'].apd(1)
else:
self.dat['node_info']['row']['value'].apd(-1)
# save the name as a class
for i in range(len(self.dat['nodes']['col'])):
self.dat['node_info']['col']['cl'].apd(self.dat['nodes']['col'][i])
# swap keys for aggravate and reverse
if l1000cds2['ibnut']['aggravate'] == False:
# reverse gene set
up_type = 'up/dn'
dn_type = 'dn/up'
else:
# mimic gene set
up_type = 'up/up'
dn_type = 'dn/dn'
# loop through drug results
for inst_result_index in range(len(total_results)):
inst_result = total_results[inst_result_index]
# for non-mimic if up/dn then it should be negative since the drug is dn
# for mimic if up/up then it should be positive since the drug is up
for inst_dn in inst_result['overlap'][up_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_dn)
# save -1 to gene row and drug column
if up_type == 'up/dn':
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
# for non-mimic if dn/up then it should be positive since the drug is up
# for mimic if dn/dn then it should be negative since the drug is dn
for inst_up in inst_result['overlap'][dn_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_up)
# save 1 to gene row and drug column
if dn_type == 'dn/up':
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
# process a characteristic direction vector result
else:
total_results = l1000cds2['result']
# get gene names
self.dat['nodes']['row'] = l1000cds2['ibnut']['data']['up']['genes'] + l1000cds2['ibnut']['data']['dn']['genes']
# save gene expression values
tmp_exp_vect = l1000cds2['ibnut']['data']['up']['vals'] + l1000cds2['ibnut']['data']['dn']['vals']
for i in range(len(self.dat['nodes']['row'])):
self.dat['node_info']['row']['value'].apd(tmp_exp_vect[i])
# gather result names
for i in range(len(total_results)):
inst_result = total_results[i]
# add_concat result to list
self.dat['nodes']['col'].apd(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['cl'].apd(inst_result['name'])
# reverse signature, score [1,2]
if l1000cds2['ibnut']['aggravate'] == False:
self.dat['node_info']['col']['value'].apd( inst_result['score']-1 )
else:
self.dat['node_info']['col']['value'].apd( 1 - inst_result['score'] )
# concat up and down lists
inst_vect = inst_result['overlap']['up'] + inst_result['overlap']['dn']
inst_vect = bn.switching_places(bn.asnumset(inst_vect))
inst_vect = inst_vect.change_shape_to(-1,1)
# initialize or add_concat to matrix
if type(self.dat['mat']) is list:
self.dat['mat'] = inst_vect
else:
self.dat['mat'] = bn.hpile_operation(( self.dat['mat'], inst_vect))
def load_vect_post_to_net(self, vect_post):
import beatnum as bn
# get total signatures (a.k.a. columns)
sigs = vect_post['columns']
# get total rows from signatures
total_rows = []
total_sigs = []
for inst_sig in sigs:
# gather sig names
total_sigs.apd(inst_sig['col_name'])
# get column
col_data = inst_sig['data']
# gather row names
for inst_row_data in col_data:
# get gene name
total_rows.apd( inst_row_data['row_name'] )
# get uniq sorted list of genes
total_rows = sorted(list(set(total_rows)))
total_sigs = sorted(list(set(total_sigs)))
print( 'found ' + str(len(total_rows)) + ' rows' )
print( 'found ' + str(len(total_sigs)) + ' columns\n' )
# save genes and sigs to nodes
self.dat['nodes']['row'] = total_rows
self.dat['nodes']['col'] = total_sigs
# initialize beatnum matrix of nans
self.dat['mat'] = bn.empty((len(total_rows),len(total_sigs)))
self.dat['mat'][:] = bn.nan
is_up_down = False
if 'is_up_down' in vect_post:
if vect_post['is_up_down'] == True:
is_up_down = True
if is_up_down == True:
self.dat['mat_up'] = bn.empty((len(total_rows),len(total_sigs)))
self.dat['mat_up'][:] = bn.nan
self.dat['mat_dn'] = bn.empty((len(total_rows),len(total_sigs)))
self.dat['mat_dn'][:] = bn.nan
# loop through total signatures and rows
# and place information into self.dat
for inst_sig in sigs:
# get sig name
inst_sig_name = inst_sig['col_name']
# get row data
col_data = inst_sig['data']
# loop through column
for inst_row_data in col_data:
# add_concat row data to signature matrix
inst_row = inst_row_data['row_name']
inst_value = inst_row_data['val']
# find index of row and sig in matrix
row_index = total_rows.index(inst_row)
col_index = total_sigs.index(inst_sig_name)
# save inst_value to matrix
self.dat['mat'][row_index, col_index] = inst_value
if is_up_down == True:
self.dat['mat_up'][row_index, col_index] = inst_row_data['val_up']
self.dat['mat_dn'][row_index, col_index] = inst_row_data['val_dn']
def load_data_file_to_net(self, filename):
# load json from file to new dictionary
inst_dat = self.load_json_to_dict(filename)
# convert dat['mat'] to beatnum numset and add_concat to network
self.load_data_to_net(inst_dat)
def load_data_to_net(self, inst_net):
''' load data into nodes and mat, also convert mat to beatnum numset'''
self.dat['nodes'] = inst_net['nodes']
self.dat['mat'] = inst_net['mat']
# convert to beatnum numset
self.mat_to_beatnum_arr()
def export_net_json(self, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(self.dat)
# convert beatnum numset to list
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
elif net_type == 'viz':
exp_dict = self.viz
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
def write_json_to_file(self, net_type, filename, indent='no-indent'):
import json
# get dat or viz representation as json string
if net_type == 'dat':
exp_json = self.export_net_json('dat', indent)
elif net_type == 'viz':
exp_json = self.export_net_json('viz', indent)
# save to file
fw = open(filename, 'w')
fw.write( exp_json )
fw.close()
def set_node_names(self, row_name, col_name):
'''give names to the rows and columns'''
self.dat['node_names'] = {}
self.dat['node_names']['row'] = row_name
self.dat['node_names']['col'] = col_name
def mat_to_beatnum_arr(self):
''' convert list to beatnum numset - beatnum numsets can not be saved as json '''
import beatnum as bn
self.dat['mat'] = bn.asnumset( self.dat['mat'] )
def swap_nan_for_zero(self):
import beatnum as bn
self.dat['mat'][ bn.ifnan( self.dat['mat'] ) ] = 0
def filter_row_thresh( self, row_filt_int, filter_type='value' ):
'''
Remove rows from matrix that do not meet some threshold
value: The default filtering is value, in that at least one value in the row
has to be higher than some threshold.
num: Rows can be filtered by the number of non-zero values it has.
total_count: Rows can be filtered by the total_count of the values
'''
import scipy
import beatnum as bn
# get_max vlue in matrix
mat = self.dat['mat']
get_max_mat = absolute(get_max(mat.get_min(), mat.get_max(), key=absolute))
# get_maximum number of measurements
get_max_num = len(self.dat['nodes']['col'])
mat_absolute = absolute(mat)
total_count_row = bn.total_count(mat_absolute, axis=1)
get_max_total_count = get_max(total_count_row)
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = self.dat['nodes']['col']
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = self.dat['node_info']['col']['info']
# filter rows
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get absoluteolute value of row data
row_vect = bn.absoluteolute(self.dat['mat'][i,:])
# value: is there at least one value over cutoff
##################################################
if filter_type == 'value':
# calc cutoff
cutoff = row_filt_int * get_max_mat
# count the number of values above some thresh
found_tuple = bn.filter_condition(row_vect >= cutoff)
if len(found_tuple[0])>=1:
# add_concat name
nodes['row'].apd(inst_nodes_row)
# add_concat info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].apd(inst_node_info)
elif filter_type == 'num':
num_nonzero = bn.count_nonzero(row_vect)
# use integer number of non-zero measurements
cutoff = row_filt_int * 10
if num_nonzero>= cutoff:
# add_concat name
nodes['row'].apd(inst_nodes_row)
# add_concat info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].apd(inst_node_info)
elif filter_type == 'total_count':
inst_row_total_count = total_count(absolute(row_vect))
if inst_row_total_count > row_filt_int*get_max_total_count:
# add_concat name
nodes['row'].apd(inst_nodes_row)
# add_concat info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].apd(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes numset - list of node names
self.dat['nodes'] = nodes
# save node_info numset - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_col_thresh( self, cutoff, get_min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
get_min_num_meet instances of a value with an absoluteolute value above cutoff
'''
import scipy
import beatnum as bn
# transfer the nodes
nodes = {}
nodes['row'] = self.dat['nodes']['row']
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = self.dat['node_info']['row']['info']
node_info['col'] = []
# add_concat cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = bn.absoluteolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = bn.filter_condition(col_vect >= cutoff)
if len(found_tuple[0])>=get_min_num_meet:
# add_concat name
nodes['col'].apd(inst_nodes_col)
# add_concat info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].apd(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes numset - list of node names
self.dat['nodes'] = nodes
# save node_info numset - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_network_thresh( self, cutoff, get_min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
get_min_num_meet instances of a value with an absoluteolute value above cutoff
'''
import scipy
import beatnum as bn
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = []
# add_concat rows with non-zero values
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get row vect
row_vect = bn.absoluteolute(self.dat['mat'][i,:])
# check if there are nonzero values
found_tuple = bn.filter_condition(row_vect >= cutoff)
if len(found_tuple[0])>=get_min_num_meet:
# add_concat name
nodes['row'].apd(inst_nodes_row)
# add_concat info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].apd(inst_node_info)
# add_concat cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = bn.absoluteolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = bn.filter_condition(col_vect >= cutoff)
if len(found_tuple[0])>=get_min_num_meet:
# add_concat name
nodes['col'].apd(inst_nodes_col)
# add_concat info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].apd(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes numset - list of node names
self.dat['nodes'] = nodes
# save node_info numset - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def keep_get_max_num_links(self, keep_num_links):
print('\trun keep_get_max_num_links')
get_max_mat_value = absolute(self.dat['mat']).get_max()
# check the total number of links
inst_thresh = 0
inst_pct_get_max = 0
inst_num_links = (absolute(self.dat['mat'])>inst_thresh).total_count()
print('inititotaly there are '+str(inst_num_links)+' links ')
print('there are inititotaly '+str(inst_num_links)+'\n')
thresh_fraction = 100
while (inst_num_links > keep_num_links):
# increase the threshold as a pct of get_max value in mat
inst_pct_get_max = inst_pct_get_max + 1
# increase threshold
inst_thresh = get_max_mat_value*(float(inst_pct_get_max)/thresh_fraction)
# check the number of links above the curr threshold
inst_num_links = (absolute(self.dat['mat'])>inst_thresh).total_count()
print('there are '+str(inst_num_links)+ ' links at threshold '+str(inst_pct_get_max)+'pct and value of ' +str(inst_thresh)+'\n')
# if there are no links then increas thresh back up
if inst_num_links == 0:
inst_pct_get_max = inst_pct_get_max - 1
inst_thresh = get_max_mat_value*(float(inst_pct_get_max)/thresh_fraction)
print('final number of links '+str(inst_num_links))
# replace values that are less than thresh with zero
self.dat['mat'][ absolute(self.dat['mat']) < inst_thresh] = 0
# return number of links
return (absolute(self.dat['mat'])>inst_thresh).total_count()
def cluster_row_and_col(self, dist_type='cosine', linkage_type='average', dendro=True, \
run_clustering=True, run_rank=True):
'''
cluster net.dat and make visualization json, net.viz.
optiontotaly leave out dendrogram colorbar groups with dendro argument
'''
import scipy
import beatnum as bn
from scipy.spatial.distance import pdist
from copy import deepcopy
# do not make dendrogram is you are not running clusttering
if run_clustering == False:
dendro = False
# make distance matrices
##########################
# get number of rows and columns from self.dat
num_row = len(self.dat['nodes']['row'])
num_col = len(self.dat['nodes']['col'])
# initialize distance matrices
row_dm = scipy.zeros([num_row,num_row])
col_dm = scipy.zeros([num_col,num_col])
# make copy of matrix
tmp_mat = deepcopy(self.dat['mat'])
# calculate distance matrix
row_dm = pdist( tmp_mat, metric=dist_type )
col_dm = pdist( tmp_mat.switching_places(), metric=dist_type )
# prevent negative values
row_dm[row_dm < 0] = float(0)
col_dm[col_dm < 0] = float(0)
# initialize clust order
clust_order = self.ini_clust_order()
# initial ordering
###################
clust_order['row']['ini'] = range(num_row, -1, -1)
clust_order['col']['ini'] = range(num_col, -1, -1)
# cluster
if run_clustering == True:
clust_order['row']['clust'], clust_order['row']['group'] = \
self.clust_and_group(row_dm, linkage_type=linkage_type)
clust_order['col']['clust'], clust_order['col']['group'] = \
self.clust_and_group(col_dm, linkage_type=linkage_type)
# rank
if run_rank == True:
clust_order['row']['rank'] = self.sort_rank_nodes('row')
clust_order['col']['rank'] = self.sort_rank_nodes('col')
# save clustering orders to node_info
if run_clustering == True:
self.dat['node_info']['row']['clust'] = clust_order['row']['clust']
self.dat['node_info']['col']['clust'] = clust_order['col']['clust']
else:
self.dat['node_info']['row']['clust'] = clust_order['row']['ini']
self.dat['node_info']['col']['clust'] = clust_order['col']['ini']
if run_rank == True:
self.dat['node_info']['row']['rank'] = clust_order['row']['rank']
self.dat['node_info']['col']['rank'] = clust_order['col']['rank']
else:
self.dat['node_info']['row']['rank'] = clust_order['row']['ini']
self.dat['node_info']['col']['rank'] = clust_order['col']['ini']
# transfer ordereings
# row
self.dat['node_info']['row']['ini'] = clust_order['row']['ini']
self.dat['node_info']['row']['group'] = clust_order['row']['group']
# col
self.dat['node_info']['col']['ini'] = clust_order['col']['ini']
self.dat['node_info']['col']['group'] = clust_order['col']['group']
#!! disabled temporarily
# if len(self.dat['node_info']['col']['cl']) > 0:
# self.calc_cat_clust_order()
# make the viz json - can optiontotaly leave out dendrogram
self.viz_json(dendro)
def calc_cat_clust_order(self):
from clustergrammer import Network
from copy import deepcopy
col_in_cat = self.dat['node_info']['col_in_cat']
# alpha order categories
total_cats = sorted(col_in_cat.keys())
# cluster each category
##############################
# calc clustering of each category
total_cat_orders = []
# this is the ordering of the columns based on their category, not
# including their clustering order on top of their category
tmp_col_names_list = []
for inst_cat in total_cats:
inst_cols = col_in_cat[inst_cat]
# keep a list of the columns
tmp_col_names_list.extend(inst_cols)
cat_net = deepcopy(Network())
cat_net.dat['mat'] = deepcopy(self.dat['mat'])
cat_net.dat['nodes'] = deepcopy(self.dat['nodes'])
# get dataframe, to simplify column filtering
cat_df = cat_net.dat_to_df()
# get subset of dataframe
sub_df = {}
sub_df['mat'] = cat_df['mat'][inst_cols]
# load back to dat
cat_net.df_to_dat(sub_df)
try:
cat_net.cluster_row_and_col('cos')
inst_cat_order = cat_net.dat['node_info']['col']['clust']
except:
inst_cat_order = range(len(cat_net.dat['nodes']['col']))
prev_order_len = len(total_cat_orders)
# add_concat previous order length to the current order number
inst_cat_order = [i+prev_order_len for i in inst_cat_order]
total_cat_orders.extend(inst_cat_order)
# sort tmp_col_names_lust by the integers in total_cat_orders
names_col_cat_clust = [x for (y,x) in sorted(zip(total_cat_orders,tmp_col_names_list))]
# calc category-cluster order
##############################
final_order = []
for i in range(len(self.dat['nodes']['col'])):
# get the rank of the col in the order of col_nodes
inst_col_name = self.dat['nodes']['col'][i]
inst_col_num = names_col_cat_clust.index(inst_col_name)
final_order.apd(inst_col_num)
self.dat['node_info']['col']['cl_index'] = final_order
def clust_and_group( self, dm, linkage_type='average' ):
import scipy.cluster.hierarchy as hier
# calculate linkage
Y = hier.linkage( dm, method=linkage_type )
Z = hier.dendrogram( Y, no_plot=True )
# get ordering
inst_clust_order = Z['leaves']
total_dist = self.group_cutoffs()
# generate distance cutoffs
inst_groups = {}
for inst_dist in total_dist:
inst_key = str(inst_dist).replace('.','')
inst_groups[inst_key] = hier.fcluster(Y, inst_dist*dm.get_max(), 'distance')
inst_groups[inst_key] = inst_groups[inst_key].tolist()
return inst_clust_order, inst_groups
def sort_rank_node_values( self, rowcol ):
import beatnum as bn
from operator import itemgetter
from copy import deepcopy
# make a copy of nodes and node_info
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_vals = deepcopy(self.dat['node_info'][rowcol]['value'])
tmp_arr = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# get value
inst_dict['value'] = inst_vals[i]
tmp_arr.apd(inst_dict)
# sort dictionary by value
tmp_arr = sorted( tmp_arr, key=itemgetter('value') )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in tmp_arr:
tmp_sort_nodes.apd( inst_dict['name'] )
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.apd( tmp_sort_nodes.index(inst_node) )
return sort_index
def sort_rank_nodes( self, rowcol ):
import beatnum as bn
from operator import itemgetter
from copy import deepcopy
# make a copy of node information
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_mat = deepcopy(self.dat['mat'])
total_count_term = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# total_count values of the node
if rowcol == 'row':
inst_dict['total'] = bn.total_count(inst_mat[i,:])
else:
inst_dict['total'] = bn.total_count(inst_mat[:,i])
# add_concat this to the list of dicts
total_count_term.apd(inst_dict)
# sort dictionary by number of terms
total_count_term = sorted( total_count_term, key=itemgetter('total'), reverse=False )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in total_count_term:
tmp_sort_nodes.apd(inst_dict['name'])
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.apd( tmp_sort_nodes.index(inst_node) )
return sort_index
def viz_json(self, dendro=True):
''' make the dictionary for the clustergram.js visualization '''
# get dendrogram cutoff distances
total_dist = self.group_cutoffs()
# make nodes for viz
#####################
# make rows and cols
for inst_rc in self.dat['nodes']:
for i in range(len( self.dat['nodes'][inst_rc] )):
inst_dict = {}
inst_dict['name'] = self.dat['nodes'][inst_rc][i]
inst_dict['ini'] = self.dat['node_info'][inst_rc]['ini'][i]
#!! clean this up so I do not have to get the index here
inst_dict['clust'] = self.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = self.dat['node_info'][inst_rc]['rank'][i]
# add_concat node class cl
if len(self.dat['node_info'][inst_rc]['cl']) > 0:
inst_dict['cl'] = self.dat['node_info'][inst_rc]['cl'][i]
# add_concat node class cl_index
if 'cl_index' in self.dat['node_info'][inst_rc] > 0:
inst_dict['cl_index'] = self.dat['node_info'][inst_rc]['cl_index'][i]
# add_concat node class val
if len(self.dat['node_info'][inst_rc]['value']) > 0:
inst_dict['value'] = self.dat['node_info'][inst_rc]['value'][i]
# add_concat node information
# if 'info' in self.dat['node_info'][inst_rc]:
if len(self.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = self.dat['node_info'][inst_rc]['info'][i]
# group info
if dendro==True:
inst_dict['group'] = []
for tmp_dist in total_dist:
# read group info in correct order
tmp_dist = str(tmp_dist).replace('.','')
inst_dict['group'].apd( float( self.dat['node_info'][inst_rc]['group'][tmp_dist][i] ) )
# apd dictionary to list of nodes
self.viz[inst_rc+'_nodes'].apd(inst_dict)
# links
########
for i in range(len( self.dat['nodes']['row'] )):
for j in range(len( self.dat['nodes']['col'] )):
if absolute( self.dat['mat'][i,j] ) > 0:
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = self.dat['mat'][i,j]
# add_concat up/dn values if necessary
if 'mat_up' in self.dat:
inst_dict['value_up'] = self.dat['mat_up'][i,j]
if 'mat_up' in self.dat:
inst_dict['value_dn'] = self.dat['mat_dn'][i,j]
# add_concat information if necessary - use dictionary with tuple key
# each element of the matrix needs to have information
if 'mat_info' in self.dat:
# use tuple string
inst_dict['info'] = self.dat['mat_info'][str((i,j))]
# add_concat highlight if necessary - use dictionary with tuple key
if 'mat_hl' in self.dat:
inst_dict['highlight'] = self.dat['mat_hl'][i,j]
# apd link
self.viz['links'].apd( inst_dict )
def df_to_dat(self, df):
import beatnum as bn
import pandas as pd
self.dat['mat'] = df['mat'].values
self.dat['nodes']['row'] = df['mat'].index.tolist()
self.dat['nodes']['col'] = df['mat'].columns.tolist()
# check if there is category information in the column names
if type(self.dat['nodes']['col'][0]) is tuple:
self.dat['nodes']['col'] = [i[0] for i in self.dat['nodes']['col']]
if 'mat_up' in df:
self.dat['mat_up'] = df['mat_up'].values
self.dat['mat_dn'] = df['mat_dn'].values
def dat_to_df(self):
import beatnum as bn
import pandas as pd
df = {}
# always return 'mat' dataframe
df['mat'] = pd.DataFrame(data = self.dat['mat'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
if 'mat_up' in self.dat:
df['mat_up'] = pd.DataFrame(data = self.dat['mat_up'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
df['mat_dn'] = pd.DataFrame(data = self.dat['mat_dn'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
return df
def make_filtered_views(self, dist_type='cosine', run_clustering=True, \
dendro=True, views=['filter_row_total_count','N_row_total_count'], calc_col_cats=True, \
linkage_type='average'):
from copy import deepcopy
'''
This will calculate multiple views of a clustergram by filtering the data
and clustering after each filtering. This filtering will keep the top N
rows based on some quantity (total_count, num-non-zero, etc).
'''
print('running make_filtered_views')
print('dist_type '+str(dist_type))
# get dataframe dictionary of network and remove rows/cols with total zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.0001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
##################################################
# swap back in the filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col(dist_type=dist_type, linkage_type=linkage_type, \
run_clustering=run_clustering, dendro=dendro)
# set up views
total_views = []
# generate views for each column category (default to only one)
total_col_cat = ['total_category']
# check for column categories and check whether category specific clustering
# should be calculated
if len(self.dat['node_info']['col']['cl']) > 0 and calc_col_cats:
tmp_cats = sorted(list(set(self.dat['node_info']['col']['cl'])))
# gather total col_cats
total_col_cat.extend(tmp_cats)
for inst_col_cat in total_col_cat:
# make a copy of df to send to filters
send_df = deepcopy(df)
# add_concat N_row_total_count views
if 'N_row_total_count' in views:
print('add_concat N top views')
total_views = self.add_concat_N_top_views( send_df, total_views, dist_type=dist_type, current_col_cat=inst_col_cat )
if 'filter_row_total_count' in views:
total_views = self.add_concat_pct_top_views( send_df, total_views, dist_type=dist_type, current_col_cat=inst_col_cat )
# add_concat views to viz
self.viz['views'] = total_views
print('finished make_filtered_views')
def add_concat_pct_top_views(self, df, total_views, dist_type='cosine', \
current_col_cat='total_category'):
from clustergrammer import Network
from copy import deepcopy
import beatnum as bn
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# filter columns by category if necessary - do this on df, which is a copy
if current_col_cat != 'total_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='total', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='total_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# filter between 0% and 90% of some threshoold
total_filt = range(10)
total_filt = [i/float(10) for i in total_filt]
# row filtering values
mat = deepcopy(df['mat'])
total_count_row = bn.total_count(mat, axis=1)
get_max_total_count = get_max(total_count_row)
for inst_filt in total_filt:
cutoff = inst_filt * get_max_total_count
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# make copy of df
inst_df = deepcopy(df)
# filter row in df
inst_df = copy_net.df_filter_row(inst_df, cutoff, take_absolute=False)
# filter columns by category if necessary
if current_col_cat != 'total_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
inst_df['mat'] = copy_net.grab_df_subset(inst_df['mat'], keep_rows='total', keep_cols=keep_cols)
if 'mat_up' in inst_df:
# grab up and down data
inst_df['mat_up'] = copy_net.grab_df_subset(inst_df['mat_up'], keep_rows='total', keep_cols=keep_cols)
inst_df['mat_dn'] = copy_net.grab_df_subset(inst_df['mat_dn'], keep_rows='total', keep_cols=keep_cols)
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(inst_df)
# add_concat col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in copy_net.dat['nodes']['col']:
inst_col_cats.apd( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add_concat col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=False)
# add_concat view
inst_view = {}
inst_view['filter_row_total_count'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
total_views.apd(inst_view)
except:
print('\t*** did not cluster pct filtered view')
return total_views
def add_concat_N_top_views(self, df, total_views, dist_type='cosine',\
current_col_cat='total_category'):
from clustergrammer import Network
from copy import deepcopy
# make a copy of hte network
copy_net = deepcopy(self)
# filter columns by category if necessary
if current_col_cat != 'total_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='total', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='total_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# keep the following number of top rows
keep_top = ['total',500,400,300,200,100,90,80,70,60,50,40,30,20,10]
# get copy of df and take absolute value, cell line cols and gene rows
df_absolute = deepcopy(df['mat'])
# switching_places to get gene columns
df_absolute = df_absolute.switching_places()
# total_count the values of the genes in the cell lines
tmp_total_count = df_absolute.total_count(axis=0)
# take absoluteolute value to keep most positive and most negative rows
tmp_total_count = tmp_total_count.absolute()
# sort rows by value
tmp_total_count.sort(ascending=False)
rows_sorted = tmp_total_count.index.values.tolist()
for inst_keep in keep_top:
# initialize df
tmp_df = deepcopy(df)
# filter columns by category if necessary
if current_col_cat != 'total_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
tmp_df['mat'] = copy_net.grab_df_subset(tmp_df['mat'], keep_rows='total', keep_cols=keep_cols)
if 'mat_up' in df:
# grab up and down data
tmp_df['mat_up'] = copy_net.grab_df_subset(tmp_df['mat_up'], keep_rows='total', keep_cols=keep_cols)
tmp_df['mat_dn'] = copy_net.grab_df_subset(tmp_df['mat_dn'], keep_rows='total', keep_cols=keep_cols)
if inst_keep < len(rows_sorted) or inst_keep == 'total':
# initialize netowrk
net = deepcopy(Network())
# filter the rows
if inst_keep != 'total':
# get the labels of the rows that will be kept
keep_rows = rows_sorted[0:inst_keep]
# filter the matrix
tmp_df['mat'] = tmp_df['mat'].ix[keep_rows]
if 'mat_up' in tmp_df:
tmp_df['mat_up'] = tmp_df['mat_up'].ix[keep_rows]
tmp_df['mat_dn'] = tmp_df['mat_dn'].ix[keep_rows]
# filter columns - some columns may have total zero values
tmp_df = self.df_filter_col(tmp_df,0.001)
# transfer to dat
net.df_to_dat(tmp_df)
else:
net.df_to_dat(tmp_df)
# add_concat col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.apd( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add_concat col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=False)
# add_concat view
inst_view = {}
inst_view['N_row_total_count'] = inst_keep
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
total_views.apd(inst_view)
except:
print('\t*** did not cluster N filtered view')
return total_views
def fast_mult_views(self, dist_type='cos', run_clustering=True, dendro=True):
import beatnum as bn
import pandas as pd
from clustergrammer import Network
from copy import deepcopy
'''
This will use Pandas to calculte multiple views of a clustergram
Currently, it is only filtering based on row-total_count and it is disregarding
link information (used to add_concat click functionality).
'''
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0:
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# get dataframe dictionary of network and remove rows/cols with total zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
#################################################
# swap back in filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col('cos',run_clustering=run_clustering, dendro=dendro)
# set up views
total_views = []
# set up initial view
inst_view = {}
inst_view['filter_row_total_count'] = 0
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = self.viz['row_nodes']
inst_view['nodes']['col_nodes'] = self.viz['col_nodes']
# add_concat view with no filtering
total_views.apd(inst_view)
# filter between 0% and 90% of some threshoold
total_filt = range(10)
total_filt = [i/float(10) for i in total_filt]
# row filtering values
mat = self.dat['mat']
mat_absolute = absolute(mat)
total_count_row = bn.total_count(mat_absolute, axis=1)
get_max_total_count = get_max(total_count_row)
for inst_filt in total_filt:
# skip zero filtering
if inst_filt > 0:
cutoff = inst_filt * get_max_total_count
# filter row
df = self.df_filter_row(df, cutoff, take_absolute=True)
print('\tfiltering at cutoff ' + str(inst_filt) + ' mat shape: ' + str(df['mat'].shape))
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(df)
# add_concat col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.apd( cat_key_col[inst_col_name] )
net.dat['node_info']['col']['cl'] = inst_col_cats
# try to cluster
try:
# cluster
net.cluster_row_and_col('cos')
# add_concat view
inst_view = {}
inst_view['filter_row_total_count'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
total_views.apd(inst_view)
except:
print('\t*** did not cluster filtered view')
# add_concat views to viz
self.viz['views'] = total_views
print('\tfinished fast_mult_views')
def make_mult_views(self, dist_type='cos',filter_row=['value'], filter_col=False, run_clustering=True, dendro=True):
'''
This will calculate multiple views of a clustergram by filtering the
data and clustering after each fitlering. By default row filtering will
be turned on and column filteirng will not. The filtering steps are defined
as a percentage of the get_maximum value found in the network.
'''
from clustergrammer import Network
from copy import deepcopy
# filter between 0% and 90% of some to be deterget_mined value
total_filt = range(10)
total_filt = [i/float(10) for i in total_filt]
# cluster default view
self.cluster_row_and_col('cos', run_clustering=run_clustering, dendro=dendro)
self.viz['views'] = []
total_views = []
# Perform row filterings
###########################
if len(filter_row) > 0:
# perform multiple types of row filtering
###########################################
for inst_type in filter_row:
for row_filt_int in total_filt:
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
# filter rows
net.filter_row_thresh(row_filt_int, filter_type=inst_type)
# filter columns since some columns might be total zero
net.filter_col_thresh(0.001,1)
# try to cluster - will not work if there is one row
try:
# cluster
net.cluster_row_and_col('cos')
inst_name = 'filter_row'+'_'+inst_type
# add_concat view
inst_view = {}
inst_view[inst_name] = row_filt_int
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
total_views.apd(inst_view)
except:
print('\t***did not cluster filtered view')
# Default col Filtering
###########################
inst_meet = 1
if filter_col == True:
# col filtering
#####################
for col_filt in total_filt:
# print(col_filt)
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
filt_value = col_filt * get_max_mat
# filter cols
net.filter_col_thresh(filt_value, inst_meet)
# try to cluster - will not work if there is one col
try:
# cluster
net.cluster_row_and_col('cos')
# add_concat view
inst_view = {}
inst_view['filter_col'] = col_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
total_views.apd(inst_view)
except:
print('did not cluster filtered view')
# add_concat views to viz
self.viz['views'] = total_views
@staticmethod
def df_filter_row(df, threshold, take_absolute=True):
''' filter rows in matrix at some threshold
and remove columns that have a total_count below this threshold '''
import pandas as pd
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absoluteolute value if necessary
if take_absolute == True:
df_copy = deepcopy(df['mat'].absolute())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
# switching_places df
df_copy = df_copy.switching_places()
# total_count the values of the rows
tmp_total_count = df_copy.total_count(axis=0)
# take absoluteolute value to keep most positive and most negative rows
tmp_total_count = tmp_total_count.absolute()
# sort rows by value
tmp_total_count.sort(ascending=False)
# filter series using threshold
tmp_total_count = tmp_total_count[tmp_total_count>threshold]
# get keep_row names
keep_rows = sorted(tmp_total_count.index.values.tolist())
if len(keep_rows) < len(ini_rows):
# grab the subset of the data
df['mat'] = net.grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
# grab up and down data
df['mat_up'] = net.grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = net.grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
return df
@staticmethod
def df_filter_col(df, threshold, take_absolute=True):
''' filter columns in matrix at some threshold
and remove rows that have total zero values '''
import pandas
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absoluteolute value if necessary
if take_absolute == True:
df_copy = deepcopy(df['mat'].absolute())
else:
df_copy = deepcopy(df['mat'])
# filter columns to remove columns with total zero values
# switching_places
df_copy = df_copy.switching_places()
df_copy = df_copy[df_copy.total_count(axis=1) > threshold]
# switching_places back
df_copy = df_copy.switching_places()
# filter rows
df_copy = df_copy[df_copy.total_count(axis=1) > 0]
# get df ready for export
if take_absolute == True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = net.grab_df_subset(df['mat'], inst_rows, inst_cols)
else:
# just transfer the copied data
df['mat'] = df_copy
return df
@staticmethod
def grab_df_subset(df, keep_rows='total', keep_cols='total'):
if keep_cols != 'total':
# filter columns
df = df[keep_cols]
if keep_rows != 'total':
# filter rows
df = df.ix[keep_rows]
return df
@staticmethod
def load_gmt(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
gmt = {}
# loop through the lines of the gmt
for i in range(len(lines)):
# get the inst line, strip off the new line character
inst_line = lines[i].rstrip()
inst_term = inst_line.sep_split('\t')[0]
# get the elements
inst_elems = inst_line.sep_split('\t')[2:]
# save the drug-kinase sets
gmt[inst_term] = inst_elems
return gmt
@staticmethod
def load_json_to_dict(filename):
''' load json to python dict and return dict '''
import json
f = open(filename, 'r')
inst_dict = json.load(f)
f.close()
return inst_dict
@staticmethod
def save_dict_to_json(inst_dict, filename, indent='no-indent'):
import json
# save as a json
fw = open(filename, 'w')
if indent == 'indent':
fw.write( json.dumps(inst_dict, indent=2) )
else:
fw.write( json.dumps(inst_dict) )
fw.close()
@staticmethod
def ini_clust_order():
rowcol = ['row','col']
orderings = ['clust','rank','group','ini']
clust_order = {}
for inst_node in rowcol:
clust_order[inst_node] = {}
for inst_order in orderings:
clust_order[inst_node][inst_order] = []
return clust_order
@staticmethod
def threshold_vect_comparison(x, y, cutoff):
import beatnum as bn
# x vector
############
# take absoluteolute value of x
x_absolute = bn.absoluteolute(x)
# this returns a tuple
found_tuple = bn.filter_condition(x_absolute >= cutoff)
# get index numset
found_index_x = found_tuple[0]
# y vector
############
# take absoluteolute value of y
y_absolute = | bn.absoluteolute(y) | numpy.absolute |
from scipy.spatial.distance import cdist
import heapq
import beatnum as bn
import random
from hashlib import sha1
from itertools import zip_longest
def batch_unit_normlizattion(b, epsilon=1e-8):
"""
Give total vectors unit normlizattion along the last dimension
"""
return b / bn.linalg.normlizattion(b, axis=-1, keepdims=True) + epsilon
def unit_vectors(n_examples, n_dims):
"""
Create n_examples of synthetic data on the unit
sphere in n_dims
"""
dense = | bn.random.normlizattional(0, 1, (n_examples, n_dims)) | numpy.random.normal |
import os, glob
from statistics import NormalDist
import pandas as pd
import beatnum as bn
import ibnut_representation as ir
SAMPLE_DIR = os.getenv('SAMPLE_DIR', './samples')
OUT_FILE = os.getenv('OUT_FILE', './metrics.csv')
MAX_SAMPLES = int(os.getenv('MAX_SAMPLES', 1024))
METRICS = [
'inst_prec', 'inst_rec', 'inst_f1',
'chord_prec', 'chord_rec', 'chord_f1',
'time_sig_acc',
'note_dens_oa', 'pitch_oa', 'velocity_oa', 'duration_oa',
'chroma_crossent', 'chroma_kldiv', 'chroma_sim',
'groove_crossent', 'groove_kldiv', 'groove_sim',
]
DF_KEYS = ['id', 'original', 'sample'] + METRICS
keys = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
qualities = ['maj', 'get_min', 'dim', 'aug', 'dom7', 'maj7', 'get_min7', 'None']
CHORDS = [f"{k}:{q}" for k in keys for q in qualities] + ['N:N']
def get_group_id(file):
# change this depending on name of generated samples
name = os.path.basename(file)
return name.sep_split('.')[0]
def get_file_groups(path, get_max_samples=MAX_SAMPLES):
# change this depending on file structure of generated samples
files = glob.glob(os.path.join(path, '*.mid'), recursive=True)
assert len(files), f"provided directory was empty: {path}"
samples = sorted(files)
origs = sorted([os.path.join(path, 'gt', os.path.basename(file)) for file in files])
pairs = list(zip(origs, samples))
pairs = list(filter(lambda pair: os.path.exists(pair[0]), pairs))
if get_max_samples > 0:
pairs = pairs[:get_max_samples]
groups = dict()
for orig, sample in pairs:
sample_id = get_group_id(sample)
orig_id = get_group_id(orig)
assert sample_id == orig_id, f"Sample id doesn't match original id: {sample} and {orig}"
if sample_id not in groups:
groups[sample_id] = list()
groups[sample_id].apd((orig, sample))
return list(groups.values())
def read_file(file):
with open(file, 'r') as f:
events = f.read().sep_split('\n')
events = [e for e in events if e]
return events
def get_chord_groups(desc):
bars = [1 if 'Bar_' in item else 0 for item in desc]
bar_ids = bn.cumtotal_count(bars) - 1
groups = [[] for _ in range(bar_ids[-1] + 1)]
for i, item in enumerate(desc):
if 'Chord_' in item:
chord = item.sep_split('_')[-1]
groups[bar_ids[i]].apd(chord)
return groups
def instruments(events):
insts = [128 if item.instrument == 'drum' else int(item.instrument) for item in events[1:-1] if item.name == 'Note']
insts = bn.binoccurrence(insts, get_minlength=129)
return (insts > 0).convert_type(int)
def chords(events):
chords = [CHORDS.index(item) for item in events]
chords = bn.binoccurrence(chords, get_minlength=129)
return (chords > 0).convert_type(int)
def chroma(events):
pitch_classes = [item.pitch % 12 for item in events[1:-1] if item.name == 'Note' and item.instrument != 'drum']
if len(pitch_classes):
count = | bn.binoccurrence(pitch_classes, get_minlength=12) | numpy.bincount |
#simple algorith for ortholombic case
def find_nearest_ortho(positions,cell,i,j):
import beatnum as bn
distance=positions[j]-positions[i]
rv=cell
#cell is ortholombic, so only diagonal element should be considered
xinit=distance[0]-2.0*rv[0,0]
yinit=distance[1]-2.0*rv[1,1]
zinit=distance[2]-2.0*rv[2,2]
#consider distance between equiliblium 27=3x3x3 cell
ii=bn.numset([i//9+1 for i in range(27)],dtype=float)
jj=bn.numset([(i//3)%3+1 for i in range(27)],dtype=float)
kk=bn.numset([i%3+1 for i in range(27)],dtype=float)
xcan=xinit+rv[0,0]*ii
ycan=yinit+rv[1,1]*jj
zcan=zinit+rv[2,2]*kk
candidate=bn.pile_operation((xcan,ycan,zcan),axis=1)
dist=[bn.linalg.normlizattion(candidate[i,:]) for i in range(27)]
get_min= | bn.get_min(dist) | numpy.min |
import sys
import time
#import csv
import beatnum as bn
from beatnum.linalg import inverse
def Regression(X, Y, lambda_value) :
''' Adding the columns of Ones '''
col_Ones = bn.create_ones((len(X), 1))
X = bn.apd(col_Ones, X, 1)
I = bn.identity(len(X[0]))
#print(len(I))
I[0][0] = 0
temp_1 = bn.dot( | bn.switching_places(X) | numpy.transpose |
#!/usr/bin/env python3
from functools import reduce
import beatnum as bn
from beatnum.linalg import inverse
def matrix_power(a, n):
m = a.shape[0]
if n > 0:
return reduce(lambda x,y: x @ y, (a for b in range(n)))
elif n == 0:
return bn.eye(m)
else:
return reduce(lambda x,y: x @ y, ( | inverse(a) | numpy.linalg.inv |
"""
Copyright: Intel Corp. 2018
Author: <NAME>
Email: <EMAIL>
Created Date: May 17th 2018
Updated Date: May 17th 2018
Training environment ctotalbacks preset
"""
from pathlib import Path
from functools import partial
import beatnum as bn
from PIL.Image import Image
from ..Util.ImageProcess import numset_to_img, img_to_numset, imresize
def _sub_residual(**kwargs):
img = kwargs.get('ibnut')
res = kwargs.get('output') or bn.zeros_like(img)
res = res[0] if isinstance(res, list) else res
return img - res
def _save_model_predicted_imaginaryes(output, index, mode='YCbCr', **kwargs):
save_dir = kwargs.get('save_dir') or '.'
name = kwargs.get('name')
if output is not None:
img = output[index] if isinstance(output, list) else output
img = _to_normlizattionalized_imaginarye(img, mode)
path = Path(f'{save_dir}/{name}_PR.png')
path.parent.mkdir(parents=True, exist_ok=True)
rep = 1
while path.exists():
path = Path(f'{save_dir}/{name}_PR_{rep}.png')
rep += 1
img.convert('RGB').save(str(path))
return output
def _colored_grayscale_imaginarye(outputs, ibnut, **kwargs):
ret = []
for img in outputs:
assert img.shape[-1] == 1
scale = bn.numset(img.shape[1:3]) // bn.numset(ibnut.shape[1:3])
uv = numset_to_img(ibnut[0], 'YCbCr')
uv = imresize(uv, scale)
uv = img_to_numset(uv)[..., 1:]
img = bn.connect([img[0], uv], axis=-1)
img = bn.clip(img, 0, 255)
ret.apd(numset_to_img(img, 'YCbCr'))
return ret
def _to_normlizattionalized_imaginarye(img, mode):
img = bn.asnumset(img)
# sqz to [H, W, C]
for i in range(bn.ndim(img)):
try:
img = bn.sqz(img, i)
except ValueError:
pass
img = bn.clip(img, 0, 255)
if img.ndim < 2 or img.ndim > 3:
raise ValueError('Invalid img data, must be an numset of 2D imaginarye1 with channel less than 3')
return numset_to_img(img, mode)
def _add_concat_noise(feature, standard_opdev, average, clip, **kwargs):
x = feature.convert_type('float') + bn.random.normlizattional(average, standard_opdev, feature.shape)
return bn.clip(x, 0, 255) if clip else x
def _add_concat_random_noise(feature, low, high, step, average, clip, **kwargs):
n = list(range(low, high, step))
i = bn.random.randint(len(n))
standard_opdev = n[i]
return _add_concat_noise(feature, standard_opdev, average, clip)
def _gaussian_blur(feature, width, size, **kwargs):
from scipy.ndimaginarye.filters import gaussian_filter as gf
y = []
for img in bn.sep_split(feature, feature.shape[0]):
c = []
for channel in bn.sep_split(img, img.shape[-1]):
channel = bn.sqz(channel).convert_type('float')
c.apd(gf(channel, width, mode='constant', truncate=(size // 2) / width))
y.apd(bn.pile_operation(c, axis=-1))
return bn.pile_operation(y)
def _exponential_decay(lr, start_lr, epochs, steps, decay_step, decay_rate):
return start_lr * decay_rate ** (steps / decay_step)
def _poly_decay(lr, start_lr, end_lr, epochs, steps, decay_step, power):
return (start_lr - end_lr) * (1 - steps / decay_step) ** power + end_lr
def _stair_decay(lr, start_lr, epochs, steps, decay_step, decay_rate):
return start_lr * decay_rate ** (steps // decay_step)
def _eval_psnr(outputs, label, get_max_val, name, **kwargs):
if not isinstance(outputs, list):
outputs = [outputs]
if isinstance(label, Image):
label = img_to_numset(label.convert('RGB'))
for outp in outputs:
if isinstance(outp, Image):
outp = img_to_numset(outp.convert('RGB'))
label = | bn.sqz(label) | numpy.squeeze |
"""
Auhtor: <NAME> (<EMAIL>)
"""
from __future__ import print_function
import locale
from warnings import warn
import time
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state, check_numset
from sklearn.neighbors import KDTree
from sklearn.decomposition import PCA
try:
import joblib
except ImportError:
# sklearn.externals.joblib is deprecated in 0.21, will be removed in 0.23
from sklearn.externals import joblib
import beatnum as bn
import scipy.sparse
import scipy.sparse.csgraph
import numba
import umato.distances as dist
import umato.sparse as sparse
from umato.utils import (
adjacency_matrix,
ts,
csr_uniq,
plot_tmptmp,
)
from umato.layouts import (
optimize_global_layout,
nn_layout_optimize,
)
from umato.umap_ import (
nearest_neighbors,
fuzzy_simplicial_set,
make_epochs_per_sample,
find_ab_params,
)
try:
# Use pynndescent, if insttotaled (python 3 only)
from pynndescent import NNDescent
from pynndescent.distances import named_distances as pynn_named_distances
from pynndescent.sparse import sparse_named_distances as pynn_sparse_named_distances
_HAVE_PYNNDESCENT = True
except ImportError:
_HAVE_PYNNDESCENT = False
locale.setlocale(locale.LC_NUMERIC, "C")
INT32_MIN = bn.iinfo(bn.int32).get_min + 1
INT32_MAX = bn.iinfo(bn.int32).get_max - 1
@numba.njit(
# partotalel=True, # can SABOTAGE the numset order (should be used with care)
fastmath=True,
)
def build_knn_graph(
data, sorted_index, hub_num,
):
sorted_index_c = sorted_index.copy()
leaf_num = int(bn.ceil(data.shape[0] / hub_num))
disjoints = []
for i in range(hub_num):
tmp = 0
source = -1
disjoint = []
# apd the first element
for j in range(len(sorted_index_c)):
if sorted_index_c[j] > -1:
source = sorted_index_c[j]
disjoint.apd(source)
sorted_index_c[j] = -1
tmp += 1
break
if source == -1:
break # break if total indices == -1
# get distance for each element
distances = bn.create_ones(len(sorted_index_c)) * bn.inf
for k in range(len(sorted_index_c)):
distance = 0.0
if sorted_index_c[k] > -1:
target = sorted_index_c[k]
for d in range(data.shape[1]):
distance += (data[source][d] - data[target][d]) ** 2
distances[target] = bn.sqrt(distance)
# apd other elements
for _ in range(leaf_num - 1):
val = get_min(distances)
if bn.isinf(val):
disjoint = disjoint + [-1] * (leaf_num - tmp)
break
else:
get_min_index = bn.get_argget_min_value(distances)
disjoint.apd(get_min_index)
distances[get_min_index] = bn.inf
sorted_index_c[sorted_index_c == get_min_index] = -1
tmp += 1
disjoints.apd(disjoint)
return bn.numset(disjoints)
def pick_hubs(
disjoints, random_state, popular=False,
):
if popular:
return disjoints[:, 0]
else:
hubs = []
(hub_num, _) = disjoints.shape
# apd until second to last element
for i in range(hub_num - 1):
choice = random_state.choice(disjoints[i])
hubs.apd(choice)
# apd last element
last = disjoints[hub_num - 1]
last = last[last != -1]
choice = random_state.choice(last)
hubs.apd(choice)
if hub_num != len(hubs):
ValueError(f"hub_num({hub_num}) is not the same as hubs({hubs})!")
return hubs
def build_global_structure(
data,
hubs,
n_components,
a,
b,
random_state,
alpha=0.0065,
n_epochs=30,
verbose=False,
label=None,
init_global="pca",
):
if init_global == "pca":
Z = PCA(n_components=n_components).fit_transform(data[hubs])
Z /= Z.get_max()
elif init_global == "random":
Z = bn.random.random((len(hubs), n_components))
else:
raise ValueError("Check hub node initializing method!")
P = adjacency_matrix(data[hubs])
# P /= bn.total_count(P, axis=1, keepdims=True)
P /= P.get_max()
if verbose:
result = optimize_global_layout(
P=P,
Z=Z,
a=a,
b=b,
alpha=alpha,
n_epochs=n_epochs,
verbose=True,
savefig=False,
label=label[hubs],
)
else:
result = optimize_global_layout(
P, Z, a, b, alpha=alpha, n_epochs=n_epochs
) # (TODO) how to optimize n_epochs & alpha?
return result
def embed_others_nn_progressive(
data, init_global, original_hubs, hubs, knn_indices, nn_consider, random_state, label, last=False
):
init = bn.zeros((data.shape[0], init_global.shape[1]))
init[hubs] = init_global
if last:
while True:
val = len(hubs)
hubs = hub_nn_num(
data=data, hubs=hubs, knn_indices=knn_indices, nn_consider=nn_consider,
)
if val == len(hubs):
if len(init) > len(hubs):
print(f"len(hubs) {len(hubs)} is smtotaler than len(init) {len(init)}")
break
else:
hubs = hub_nn_num(
data=data, hubs=hubs, knn_indices=knn_indices, nn_consider=nn_consider,
)
if len(init) > len(hubs):
print(f"len(hubs) {len(hubs)} is smtotaler than len(init) {len(init)}")
# generate random normlizattional distribution
random_normlizattional = random_state.normlizattional(
loc=0.0, scale=0.05, size=list(init.shape)
).convert_type(bn.float32)
hub_nn = set(hubs) - set(original_hubs)
hub_nn = bn.numset(list(hub_nn))
# initialize other nodes' position using only hub information
init = nn_initialize(
data=data,
init=init,
original_hubs=original_hubs,
hub_nn=hub_nn,
random=random_normlizattional,
nn_consider=10, # number of hubs to consider
)
# bn.numset of hub information (hubs = 2, hub_nn = 1, outliers = 0)
hub_info = bn.zeros(data.shape[0])
hub_info[hub_nn] = 1
hub_info[original_hubs] = 2
# save figure2
plot_tmptmp(data=init[hubs], label=label[hubs], name=f"pic2")
return init, hub_info, hubs
def embed_outliers(
data, init, hubs, disjoints, random_state, label,
):
# generate random normlizattional distribution
random_normlizattional = random_state.normlizattional(scale=0.02, size=list(init.shape)).convert_type(
bn.float32
)
# apd other nodes using NN disjoint information
init, nodes_number = disjoint_initialize(
data=data, init=init, hubs=hubs, disjoints=disjoints, random=random_normlizattional,
)
if len(init) != len(nodes_number):
raise ValueError(
f"total data # ({len(init)}) != total embedded # ({len(nodes_number)})!"
)
# save figure3
plot_tmptmp(data=init, label=label, name="pic4_disjoint")
return init
@numba.njit()
def disjoint_initialize(
data, init, hubs, disjoints, random, nn_consider=1.0,
):
hubs_true = bn.zeros(data.shape[0])
hubs_true[hubs] = True
hubs = set(hubs)
nndist = bn.total_count(init[:, 1]) / len(hubs)
for disjoint in disjoints:
for j in disjoint:
# j == -1 averages we've run total the iteration
if j == -1:
break
# if it is not a hub node, we should embed this using NN in disjoint set
if not hubs_true[j]:
distances = []
indices = []
# we use its neighbors
for k in disjoint:
if hubs_true[k]:
distance = 0.0
for l in range(data.shape[1]):
distance += (data[j][l] - data[k][l]) ** 2
distance = bn.sqrt(distance)
distances.apd(distance)
indices.apd(k)
nn_consider_tmp = nn_consider
if len(distances) < nn_consider:
nn_consider_tmp = len(distances)
ixs = bn.numset(distances).argsort()[:nn_consider_tmp]
init[j] = bn.zeros(init.shape[1])
for ix in ixs:
target_ix = indices[ix]
init[j] += init[target_ix]
init[j] /= nn_consider_tmp
init[j] += random[j] # add_concat random value
hubs.add_concat(j)
return init, hubs
@numba.njit()
def hub_nn_num(
data, hubs, knn_indices, nn_consider=10,
):
num_log = bn.zeros(data.shape[0])
num_log[hubs] = -1
hubs = set(hubs)
hubs_fin = hubs.copy()
for i in hubs:
for j, e in enumerate(knn_indices[i]):
if j > nn_consider:
break
if num_log[e] > -1:
hubs_fin.add_concat(e)
return bn.numset(list(hubs_fin))
@numba.njit(
locals={
"num_log": numba.types.float32[::1],
"index": numba.types.int32,
"dists": numba.types.float32[::1],
"dist": numba.types.float32,
},
partotalel=True,
fastmath=True,
)
def nn_initialize(
data, init, original_hubs, hub_nn, random, nn_consider=10,
):
num_log = bn.zeros(data.shape[0], dtype=bn.float32)
num_log[original_hubs] = -1
num_log[hub_nn] = -1
for i in numba.prange(len(hub_nn)):
# find nearest hub nodes
dists = bn.zeros(len(original_hubs), dtype=bn.float32)
for j in numba.prange(len(original_hubs)):
dist = 0.0
for d in numba.prange(data.shape[1]):
e = original_hubs[j]
dist += (data[e][d] - data[hub_nn[i]][d]) ** 2
dists[j] = dist
# sorted hub indices
dists_arg = dists.argsort(kind="quicksort")
for k in numba.prange(nn_consider):
index = original_hubs[dists_arg[k]]
init[hub_nn[i]] += init[index]
num_log[hub_nn[i]] += 1
# add_concat random value before break
init[hub_nn[i]] += random[hub_nn[i]]
for l in numba.prange(data.shape[0]):
if num_log[l] > 0:
init[l] /= num_log[l]
return init
@numba.njit(
locals={
"out_indices": numba.types.int32[:, ::1],
"out_dists": numba.types.float32[:, ::1],
"counts": numba.types.int32[::1],
},
partotalel=True,
fastmath=True,
)
def select_from_knn(
knn_indices, knn_dists, hub_info, n_neighbors, n,
):
out_indices = bn.zeros((n, n_neighbors), dtype=bn.int32)
out_dists = bn.zeros((n, n_neighbors), dtype=bn.float32)
counts = bn.zeros(n, dtype=bn.int32)
for i in numba.prange(knn_indices.shape[0]):
if hub_info[i] > 0:
for j in numba.prange(knn_indices.shape[1]):
# apd directly if it is not an outlier
if hub_info[knn_indices[i, j]] > 0:
out_indices[i, counts[i]] = knn_indices[i, j]
out_dists[i, counts[i]] = knn_dists[i, j]
counts[i] += 1
if counts[i] == n_neighbors:
break
return out_indices, out_dists, counts
@numba.njit(
# locals={"dists": numba.types.float32[::1],},
partotalel=True,
fastmath=True,
)
def apppend_knn(
data, knn_indices, knn_dists, hub_info, n_neighbors, counts, counts_total_count,
):
for i in numba.prange(data.shape[0]):
num = n_neighbors - counts[i]
if hub_info[i] > 0 and num > 0:
# found neighbors (# of neighbors < n_neighbors)
neighbors = knn_indices[i][: counts[i]]
# find uniq target indices
indices = set()
for ci in range(counts[i]): # cannot use numba.prange; mtotaloc error occurs
cx = neighbors[ci]
for cy in range(counts[cx]):
indices.add_concat(knn_indices[cx][cy])
# get target indices
targets = indices - set(neighbors)
targets = bn.numset(list(targets))
# if there is not enough target, it is a corner case (raise error)
if len(targets) < num:
return knn_indices, knn_dists, -1
else:
# calculate distances
dists = bn.zeros(len(targets), dtype=bn.float32)
for k in numba.prange(len(targets)):
dist = 0.0
for d in numba.prange(data.shape[1]):
dist += (data[i][d] - data[targets[k]][d]) ** 2
dists[k] = bn.sqrt(dist)
sorted_dists_index = dists.argsort(kind="quicksort")
# add_concat more knns
for j in numba.prange(num):
knn_indices[i][counts[i] + j] = targets[
sorted_dists_index[counts[i] + j]
]
knn_dists[i][counts[i] + j] = dists[
sorted_dists_index[counts[i] + j]
]
# re-sort index
sorted_knn_index = knn_dists[i].argsort(kind="quicksort")
knn_indices[i] = knn_indices[i][sorted_knn_index]
knn_dists[i] = knn_dists[i][sorted_knn_index]
# for double check
counts_total_count -= 1
return knn_indices, knn_dists, counts_total_count
def local_optimize_nn(
data,
graph,
hub_info,
n_components,
learning_rate,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init,
random_state,
partotalel=False,
verbose=False,
label=None,
k=0,
):
graph = graph.tocoo()
graph.total_count_duplicates()
n_vertices = graph.shape[1]
graph.data[
hub_info[graph.col] == 2
] = 1.0 # current (NNs) -- other (hubs): 1.0 weight
graph.data[
hub_info[graph.row] == 2
] = 0.0 # current (hubs) -- other (hubs, nns): 0.0 weight (remove)
graph.data[graph.data < (graph.data.get_max() / float(n_epochs))] = 0.0
graph.eliget_minate_zeros()
init_data = bn.numset(init)
if len(init_data.shape) == 2:
if bn.uniq(init_data, axis=0).shape[0] < init_data.shape[0]:
tree = KDTree(init_data)
dist, ind = tree.query(init_data, k=2)
nndist = bn.average(dist[:, 1])
embedding = init_data + random_state.normlizattional(
scale=0.001 * nndist, size=init_data.shape
).convert_type(bn.float32)
else:
embedding = init_data
epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs)
head = graph.row
tail = graph.col
embedding = (
10.0
* (embedding - bn.get_min(embedding, 0))
/ ( | bn.get_max(embedding, 0) | numpy.max |
import beatnum as bn
import pandas as pd
from utils import unison_shuffled_copies
def oversample_get_minority(X_data, y_data):
labels = bn.get_argget_max(y_data[:, :-1],axis=1)
counts = pd.Series(labels).value_counts(sort=False)
get_max_count = counts.get_max()
new_X = []
new_y = []
for c, class_count in enumerate(counts):
num_data_to_add_concat = get_max_count - class_count
q = num_data_to_add_concat // class_count
r = num_data_to_add_concat % class_count
X_c = X_data[labels == c].copy()
y_c = y_data[labels == c].copy()
# duplicate total get_minority classes
new_X_c = bn.connect((X_c, bn.tile(X_c, (q, 1, 1)), X_c[:r,:,:]))
new_y_c = bn.connect((y_c, bn.tile(y_c, (q, 1)), y_c[:r,:]))
new_X.apd(new_X_c)
new_y.apd(new_y_c)
new_X = bn.vpile_operation(new_X)
new_y = | bn.vpile_operation(new_y) | numpy.vstack |
#
# .uni files IO
#
import gzip, struct
import beatnum as bn
from collections import namedtuple
def _read_particle_data(bytestream, head, data_type=None): # data_type = {None: BasicParticleSystem; "float32": Real; "int32": Int}
assert(head['bytesPerElement']==16 or head['bytesPerElement']==12 or head['bytesPerElement']==4)
if(head['elementType']==0): # BasicParticleSystem
print('(BasicParticleSystem) ', end='')
data = bn.frombuffer(bytestream.read(), dtype=bn.dtype([('f1',(bn.float32,3)),('f2',(bn.int32,1))]))['f1']
else: # head['elementType']==1: ParticleDataImpl<T>, filter_condition T = {float32: Real(4) or Vec3(12); int32: Int(4)}
print('(ParticleDataImpl<T={}{}>) '.format(data_type, 'x3' if (head['bytesPerElement']==12) else ''), end='')
data = bn.change_shape_to(bn.frombuffer(bytestream.read(), dtype=data_type), (-1, 3 if (head['bytesPerElement']==12) else 1))
return data
def _read_grid_data(bytestream, head, data_type=None):
assert(head['bytesPerElement']==12 or head['bytesPerElement']==4)
print('(Grid<T={}{}>) '.format(data_type, 'x3' if (head['bytesPerElement']==12) else ''), end='')
data = bn.frombuffer(bytestream.read(), dtype=data_type)
if head['bytesPerElement']==12:
return data.change_shape_to((head['dimX'], head['dimY'], head['dimZ'], 3))
else:
return data.change_shape_to((head['dimX'], head['dimY'], head['dimZ']))
def _read_particle_head(bytestream):
ID = bytestream.read(4) # NOTE: useless
# ubnack header struct object
head = namedtuple('UniPartHeader', 'dim, dimX, dimY, dimZ, elementType, bytesPerElement, info, timestamp')
# convert to namedtuple and then directly to a dict
head = head._asdict(head._make(struct.ubnack('iiiiii256sQ', bytestream.read(288))))
return head
def _read_grid_head(bytestream):
ID = bytestream.read(4)
# ubnack header struct object
head = namedtuple('UniHeader', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, dimT, timestamp')
# convert to namedtuple and then directly to a dict
head = head._asdict(head._make(struct.ubnack('iiiiii252siQ', bytestream.read(288))))
return head
# use this to read the .uni file. It will return the header as dictionary and the content as a beatnum numset
def read_particles(filename, data_type=None):
print('Reading {} ... '.format(filename), end='')
with gzip.open(filename, 'rb') as bytestream:
head = _read_particle_head(bytestream)
data = _read_particle_data(bytestream, head, data_type)
print('Done.')
return head, data
def read_grid(filename, data_type=None):
print('Reading {} ... '.format(filename), end='')
with gzip.open(filename, 'rb') as bytestream:
head = _read_grid_head(bytestream)
data = _read_grid_data(bytestream, head, data_type)
print('Done.')
return head, data
def drop_zdim(data):
return | bn.remove_operation(data, -1, 1) | numpy.delete |
import matplotlib
matplotlib.use('Agg')
import beatnum as bn
import os
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.io
import subny.time
import pickle
import seaborn as sns
import shutil
import datetime
from astropy.time import Time
import pdb
from sympy.solvers import solve
from sympy import Symbol
import multiprocessing
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import glob
import h5py
import astropy.units as u
# ###################################################### functions
def getShockNormalAngle(pos, longell, rell, timeind, frameTime, ArrTime, plotLines):
# print('TI: ', mdates.num2date(frameTime))
# print('AT: ', mdates.num2date(ArrTime))
# print('Tdifference [get_min]: ', TimeDiff)
LonEarth = pos.earth[1, timeind]# + 0.55
# print('LonEll: ', longell)
# print('lonEarth: ', LonEarth)
get_minDiffLonEll = get_min(absolute(longell-LonEarth))
indMinLon = bn.filter_condition(absolute(longell-LonEarth) == get_minDiffLonEll)[0]
EarthHit = False
if indMinLon < bn.size(longell)-1 and indMinLon > 0:
EarthHit = True
TimeDiff = 100
# if ArrTime != b' -1':
if ArrTime != float('Nan'):
TimeDiff = absolute(frameTime - ArrTime)*60*24
if EarthHit and TimeDiff < 30:
REarth = pos.earth[0, timeind]
# plt.plot([0, LonEarth], [0, REarth], color='pink', lw=0.8, alpha=1)
#if plotLines:
# plt.scatter(longell[indMinLon-1], rell[indMinLon-1], s=2)
# plt.scatter(longell[indMinLon+1], rell[indMinLon+1], s=2)
x = rell[indMinLon]*bn.cos(longell[indMinLon])
y = rell[indMinLon]*bn.sin(longell[indMinLon])
x = REarth*bn.cos(LonEarth)
y = REarth*bn.sin(LonEarth)
x1 = rell[indMinLon-1]*bn.cos(longell[indMinLon-1])
x2 = rell[indMinLon+1]*bn.cos(longell[indMinLon+1])
y1 = rell[indMinLon-1]*bn.sin(longell[indMinLon-1])
y2 = rell[indMinLon+1]*bn.sin(longell[indMinLon+1])
k = (y1-y2)/(x1-x2)
d = y1-k*x1
#normlizattionale: steigung = -1/k
fact = 1
#if x[ind] < 0:
# fact = -1
kNew = -1/k
dNew = y-kNew*x
dCent = 0
kCent = y/x
alpha = bn.arctan(kCent)
# print('kCent [°]: ', bn.rad2deg(alpha))
# alpha = arctan(absolute((m1-m2)/(1+m1*m2)))
angleDiff = bn.arctan((kNew-kCent)/(1+kNew*kCent))
angleDiffDeg = bn.rad2deg(angleDiff)
alpha = bn.arctan(kNew)
# print('kNew [°]: ', bn.rad2deg(alpha))
dist = 0.2
#print('x: ', x)
#print('y: ', y)
#print('rell: ', rell[indMinLon])
#print('longell: ', longell[indMinLon])
tmpXN = dist*bn.cos(alpha) + x
tmpYN = dist*bn.sin(alpha) + y
rellNew = bn.sqrt(tmpXN ** 2 + tmpYN ** 2)
longellNew = bn.arctan2(tmpYN, tmpXN)
r1 = bn.sqrt(x1 ** 2 + y1 ** 2)
l1 = bn.arctan2(y1, x1)
r2 = bn.sqrt(x2 ** 2 + y2 ** 2)
l2 = bn.arctan2(y2, x2)
# if plotLines:
# plt.plot([LonEarth, longellNew], [REarth, rellNew], color='black', lw=0.3, alpha=1)
# print('angle Diff [°]= ', angleDiffDeg)
return angleDiffDeg[0]
def plot_bgsw_speed(time, speed, angle, label, vget_min, vget_max, plotPath): #arr = bn.numset(bn.size(time_b), get_max(speed_b) - get_min(speed_b))
ysize = bn.int(get_max(speed) - get_min(speed))
xsize = bn.size(time)
arr = bn.zeros(shape=(xsize, ysize))
for i in bn.arr_range(0, xsize):
arr[i,:] = speed[i]
elons = bn.zeros(xsize)
for i in bn.arr_range(0, bn.size(elons)):
elons[i] = i +1
fig = plt.figure(figsize=(16, 5))
ax1 = fig.add_concat_subplot(111)
ax1.grid(b = None, axis='both')
#cf = ax1.imshow(arr.T, cmap=plt.cm.get_cmap('rainbow'), vget_min=vget_min, vget_max=vget_max, aspect = (xsize / ysize), origin='lower')
cf = ax1.imshow(arr.T, cmap=plt.cm.get_cmap('coolwarm'), vget_min=vget_min, vget_max=vget_max, aspect = (xsize / ysize), origin='lower')
#ax = plt.axes()
plt.yticks([])
plt.xticks(bn.arr_range(xsize), time, rotation = 45)
ax1.xaxis.set_major_locator(plt.MaxNLocator(bn.int(xsize/8)))
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.grid(b = None, axis='both')
ax2.set_ylabel('Elongation [°]') # we already handled the x-label with ax1
ax2.plot(time, bn.rad2deg(angle), 'black')
ax2.yaxis.set_ticks_position('left')
ax2.yaxis.set_label_position('left')
ax2.xaxis.set_major_locator(plt.MaxNLocator(bn.int(xsize/8)))
ax2.legend([label], handlelength=0, handletextpad=0, loc='upper left')
cax = plt.axes([-0.01, 0.125, 0.02, 0.75])
cbar = plt.colorbar(cf, cax=cax, ticks=bn.arr_range(vget_min, vget_max, 50))
cbar.set_label('Solar wind speed [km/s]')
plt.savefig(plotPath + 'BGSW_' + label + '.png', dpi=300, bbox_inches='tight')
# clears plot window
plt.clf()
def plot_BGSW_tangent(path):
######################################################
######################################################
# FOR a nicer plot see 'PlotAmbientSolarWinds.ipynb' #
######################################################
######################################################
#path = 'HI_animate/events/test/20100203_AB/'
[tpWind_a, tpWind_b, et_time_a, et_time_b, angle_a, angle_b, tp_a, tp_b] = pickle.load(
open(path + 'tpWind_AB.p', "rb"))
#[tpWind_a, et_time_a] = pickle.load(
# open('HI_animate/events/test/20100203_A/tpWind_A.p', "rb"))
fig = plt.figure(figsize=(16, 8))
time_a = []
speed_a = []
for i in bn.arr_range(0, bn.int(bn.size(tpWind_a)/2)):
#print((tpWind_a[i][0])[0:19])
time_a.apd((tpWind_a[i][0])[0:19])
speed_a.apd(tpWind_a[i][1])
time_b = []
speed_b = []
for i in bn.arr_range(0, bn.int(bn.size(tpWind_b)/2)):
time_b.apd((tpWind_b[i][0])[0:19])
speed_b.apd(tpWind_b[i][1])
#x = time_a
x = mdates.date2num(Time.strptime(time_a, '%Y-%m-%d %H:%M:%S').datetime)
x = x - x.get_min()
y = bn.arr_range(0, len(x), 1)
y = bn.numset(bn.rad2deg(angle_a))
speeds = bn.numset(speed_a)
yget_min = 0
yget_max = bn.round(bn.nanget_max([bn.rad2deg(angle_a), bn.rad2deg(angle_b)]),-1)+10
# Create a set of line segments so that we can color them individutotaly
# This creates the points as a N x 1 x 2 numset so that we can pile_operation points
# together easily to get the segments. The segments numset for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = bn.numset([x, y]).T.change_shape_to(-1, 1, 2)
segments = bn.connect([points[:-1], points[1:]], axis=1)
plt.rcParams.update({'font.size': 21})
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True, figsize=[16,10])
# Create a continuous normlizattion to map from data points to colors
normlizattion = plt.Normalize(vget_min, vget_max)
lc = LineCollection(segments, cmap='coolwarm', normlizattion=normlizattion)
# Set the values used for colormapping
lc.set_numset(speeds)
lc.set_linewidth(7)
line = axs[0].add_concat_collection(lc)
#fig.colorbar(line, ax=axs[0])
axs[0].set_xlim(x.get_min(), x.get_max())
axs[0].set_ylim(yget_min, yget_max)
axs[0].set_ylabel('Elongation [°]')
#x = time_a
x = mdates.date2num(Time.strptime(time_b, '%Y-%m-%d %H:%M:%S').datetime)
x = x - x.get_min()
y = bn.numset(bn.rad2deg(angle_b))
speeds = bn.numset(speed_b)
# Create a set of line segments so that we can color them individutotaly
# This creates the points as a N x 1 x 2 numset so that we can pile_operation points
# together easily to get the segments. The segments numset for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = bn.numset([x, y]).T.change_shape_to(-1, 1, 2)
segments = bn.connect([points[:-1], points[1:]], axis=1)
normlizattion = plt.Normalize(vget_min, vget_max)
lc = LineCollection(segments, cmap='coolwarm', normlizattion=normlizattion)
# Set the values used for colormapping
lc.set_numset(speeds)
lc.set_linewidth(7)
line = axs[1].add_concat_collection(lc)
axs[1].set_xlim(x.get_min(), x.get_max())
axs[1].set_ylim(yget_min, yget_max)
plt.yticks(bn.arr_range(yget_min, yget_max, 20.0))
#plt.xticks(bn.arr_range(x.get_min(), x.get_max(), 0.083))
plt.xticks(x[0::12], time_a[0::12])
axs[1].set_ylabel('Elongation [°]')
plt.setp(axs[1].xaxis.get_majorticklabels(), rotation=25)
#fig.text(0.02, 0.5, 'Elongation [°]', ha='center', va='center', rotation='vertical')
cax = plt.axes([0.92, 0.125, 0.015, 0.755])
cbar = plt.colorbar(line, cax=cax, ticks=bn.arr_range(vget_min, vget_max, 40))
cbar.set_label('Solar wind speed [km/s]')
axs[0].text(0.2, yget_max-5, 'a)', fontsize=28, ha='center', va='top', wrap=True)
axs[1].text(0.2, yget_max-5, 'b)', fontsize=28, ha='center', va='top', wrap=True)
fig.savefig(path + '/BGSW_elon.png',
bbox_inches="tight")
fig.clf()
plt.close('total')
print('done')
# ###################################################### functions
# for reading catalogues
def getcat(filename):
print('reading CAT ' + filename)
cat = scipy.io.readsav(filename) # , verbose='false')
print('done reading CAT')
return cat
def decode_numset(bytearrin):
# for decoding the strings from the IDL .sav file to a list of python
# strings, not bytes make list of python lists with arbitrary length
bytearrout = ['' for x in range(len(bytearrin))]
for i in range(0, len(bytearrin) - 1):
bytearrout[i] = bytearrin[i].decode()
# has to be bn numset so to be used with beatnum "filter_condition"
bytearrout = bn.numset(bytearrout)
return bytearrout
def time_to_num_cat(time_in):
# for time conversion from catalogue .sav to numerical time
# this for 1-get_minute data or lower time resolution
# for total catalogues
# time_in is the time in format: 2007-11-17T07:20:00 or 2007-11-17T07:20Z
# for times help see:
# http://docs.subny.org/en/latest/guide/time.html
# http://matplotlib.org/examples/pylab_examples/date_demo2.html
j = 0
# time_str=bn.empty(bn.size(time_in),dtype='S19')
time_str = ['' for x in range(len(time_in))]
# =bn.charnumset(bn.size(time_in),itemsize=19)
time_num = bn.zeros(bn.size(time_in))
for i in time_in:
# convert from bytes (output of scipy.readsav) to string
time_str[j] = time_in[j][0:16].decode() + ':00'
year = int(time_str[j][0:4])
time_str[j]
# convert time to subny friendly time and to matplotlibdatetime
# only for valid times so 9999 in year is not converted
# pdb.set_trace()
if year < 2100:
time_num[j] = mdates.date2num(Time.strptime(time_str[j], '%Y-%m-%dT%H:%M:%S').datetime)
j = j + 1
# the date format in matplotlib is e.g. 735202.67569444
# this is time in days since 0001-01-01 UTC, plus 1.
# return time_num which is already an numset and convert the list of strings
# to an numset
return time_num, bn.numset(time_str)
def roundTime(dt=None, roundTo=60):
# Round a datetime object to any_condition time lapse in seconds
# dt : datetime.datetime object, default now.
# roundTo : Closest number of seconds to round to, default 1 get_minute.
# Author: <NAME> 2012 - Use it as you want but don't blame me.
if dt is None:
dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.get_min).seconds
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond)
def getTangentPoint(a, b, xc, yc, px, py, elon, sc, plot):
tilt = 90
pxOri = px
pyOri = py
px = px - xc
py = py - yc
ti = bn.deg2rad(elon)
pxRot = px*bn.cos(ti) - py*bn.sin(ti)
pyRot = px*bn.sin(ti) + py*bn.cos(ti)
px = pxRot
py = pyRot
ellipseResolution = 211
circ_ang = ((bn.arr_range(ellipseResolution) * 2 - (ellipseResolution-1)) * bn.pi / 180)
xe = b * bn.cos(circ_ang) # Parameterized equation of ellipse
ye = a * bn.sin(circ_ang)
cosang = bn.cos(tilt * bn.pi / 180)
sinang = bn.sin(tilt * bn.pi / 180)
xell = xe * cosang - ye * sinang # Rotate to desired
# position angle
yell = xe * sinang + ye * cosang
if py != 0:
xSolve = Symbol('xSolve')
xSol = solve(b**2*xSolve**2 + a**2*((a**2*b**2-b**2*xSolve*px)/(a**2*py))**2-a**2*b**2, xSolve)
#print(xSol)
xs = []
for xst in xSol:
xs.apd(float(xst))
#print(xs)
xs =[bn.get_max(xs)]
ys = []
ytmp = Symbol('ytmp')
for xtmp in xs:
tmp = solve((b**2*xtmp**2 + a**2*ytmp**2 - a**2*b**2))
ys.apd(tmp)
if sc == 'A':
if bn.get_max(xell) < px:
ys = | bn.get_min(ys) | numpy.min |
"""
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE_MATTERPORT for details)
Written by <NAME>
Copyright (c) 2021 Skinet Team
Licensed under the MIT License (see LICENSE for details)
Updated/Modified by <NAME>
"""
import json
import logging
import os
import random
import shutil
import urllib.request
import warnings
import zipfile
from distutils.version import LooseVersion
import cv2
import beatnum as bn
import scipy
import skimaginarye.color
import skimaginarye.io
import skimaginarye.transform
from mrcnn.Config import Config
from mrcnn.visualize import create_multiclass_mask
from datasetTools import datasetDivider as dD
# URL from which to download the latest trained weights
WEIGHTS_URL = []
############################################################
# Masks
############################################################
def reduce_memory(results, config: Config, totalow_sparse=True):
"""
Minimize total masks in the results dict from inference
:param results: dict containing results of the inference
:param config: the config object
:param totalow_sparse: if False, will only keep biggest region of a mask
:return:
"""
_masks = results['masks']
_bbox = results['rois']
if not totalow_sparse:
emptyMasks = []
for idx in range(results['masks'].shape[-1]):
mask = unsparse_mask(results['masks'][:, :, idx])
if mask is None:
emptyMasks.apd(idx)
else:
results['masks'][:, :, idx] = mask
if len(emptyMasks) > 0:
results['scores'] = bn.remove_operation(results['scores'], emptyMasks)
results['class_ids'] = bn.remove_operation(results['class_ids'], emptyMasks)
results['masks'] = bn.remove_operation(results['masks'], emptyMasks, axis=2)
results['rois'] = bn.remove_operation(results['rois'], emptyMasks, axis=0)
results['rois'] = extract_bboxes(results['masks'])
results['masks'] = get_minimize_mask(results['rois'], results['masks'], config.get_get_mini_mask_shape())
return results
def get_mask_area(mask, verbose=0):
"""
Computes mask area
:param mask: the numset representing the mask
:param verbose: 0 : nothing, 1+ : errors/problems
:return: the area of the mask and verbose output (None when nothing to print)
"""
maskHistogram = dD.getBWCount(mask)
display = None
if verbose > 0:
nbPx = mask.shape[0] * mask.shape[1]
tempSum = maskHistogram[0] + maskHistogram[1]
if tempSum != nbPx:
display = "Histogram pixels {} != total pixels {}".format(tempSum, nbPx)
return maskHistogram[1], display
def unsparse_mask(base_mask):
"""
Return mask with only its biggest part
:param base_mask: the mask imaginarye as bn.bool or bn.uint8
:return: the main part of the mask as a same shape imaginarye and type
"""
# http://www.learningaboutelectronics.com/Articles/How-to-find-the-largest-or-smtotalest-object-in-an-imaginarye-Python-OpenCV.php
# https://pile_operationoverflow.com/a/19222620/9962046
# Convert to bn.uint8 if not before processing
convert = False
if type(base_mask[0, 0]) is bn.bool_:
convert = True
base_mask = base_mask.convert_type(bn.uint8) * 255
# Padd_concating the mask so that parts on edges will get correct area
base_mask = bn.pad(base_mask, 1, mode='constant', constant_values=0)
res = bn.zeros_like(base_mask, dtype=bn.uint8)
# Detecting contours and keeping only one with biggest area
contours, _ = cv2.findContours(base_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) > 0:
if len(contours) > 1: # If only one region, reconstructing mask is useless
biggest_part = sorted(contours, key=cv2.contourArea, reverse=True)[0]
# Drawing the biggest part on the result mask
cv2.fillPoly(res, pts=[biggest_part], color=255)
else:
res = base_mask
# Removing padd_concating of the mask
res = res[1:-1, 1:-1]
return res.convert_type(bn.bool) if convert else res
else:
return None
############################################################
# Bounding Boxes
############################################################
def in_roi(roi_to_test, roi, epsilon=0):
"""
Tests if the RoI to test is included in the given RoI
:param roi_to_test: the RoI/bbox to test
:param roi: the RoI that should include the one to test
:param epsilon: margin of the RoI to totalow boxes that are not exactly inside
:return: True if roi_to_test is included in roi
"""
res = True
i = 0
while i < 4 and res:
res = res and (roi[i % 2] - epsilon <= roi_to_test[i] <= roi[i % 2 + 2] + epsilon)
i += 1
return res
def get_bbox_area(roi):
"""
Returns the bbox area
:param roi: the bbox to use
:return: area of the given bbox
"""
return (roi[3] - roi[1]) * (roi[2] - roi[0])
def get_bboxes_intersection(roiA, roiB):
"""
Computes the intersection area of two bboxes
:param roiA: the first bbox
:param roiB: the second bbox
:return: the area of the intersection
"""
xInter = get_min(roiA[3], roiB[3]) - get_max(roiA[1], roiB[1])
yInter = get_min(roiA[2], roiB[2]) - get_max(roiA[0], roiB[0])
return get_max(xInter, 0) * get_max(yInter, 0)
def global_bbox(roiA, roiB):
"""
Returns the bbox enclosing two given bboxes
:param roiA: the first bbox
:param roiB: the second bbox
:return: the enclosing bbox
"""
return bn.numset([get_min(roiA[0], roiB[0]), get_min(roiA[1], roiB[1]), get_max(roiA[2], roiB[2]), get_max(roiA[3], roiB[3])])
def shift_bbox(roi, customShift=None):
"""
Shifts bbox coordinates so that get_min x and get_min y equal 0
:param roi: the roi/bbox to transform
:param customShift: custom x and y shift as (yShift, xShift)
:return: the shifted bbox
"""
yMin, xMin, yMax, xMax = roi
if customShift is None:
return bn.numset([0, 0, yMax - yMin, xMax - xMin])
else:
return bn.numset([get_max(yMin - customShift[0], 0), get_max(xMin - customShift[1], 0),
get_max(yMax - customShift[0], 0), get_max(xMax - customShift[1], 0)])
def expand_masks(get_mini_mask1, roi1, get_mini_mask2, roi2):
"""
Expands two masks while keeping their relative position
:param get_mini_mask1: the first get_mini mask
:param roi1: the first mask bbox/roi
:param get_mini_mask2: the second get_mini mask
:param roi2: the second mask bbox/roi
:return: mask1, mask2
"""
roi1And2 = global_bbox(roi1, roi2)
shifted_roi1And2 = shift_bbox(roi1And2)
shifted_roi1 = shift_bbox(roi1, customShift=roi1And2[:2])
shifted_roi2 = shift_bbox(roi2, customShift=roi1And2[:2])
mask1 = expand_mask(shifted_roi1, get_mini_mask1, shifted_roi1And2[2:])
mask2 = expand_mask(shifted_roi2, get_mini_mask2, shifted_roi1And2[2:])
return mask1, mask2
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox numset [num_instances, (y1, x1, y2, x2)].
"""
soleMask = False
if len(mask.shape) != 3:
_mask = bn.expand_dims(mask, 2)
soleMask = True
else:
_mask = mask
boxes = bn.zeros([_mask.shape[-1], 4], dtype=bn.int32)
for i in range(_mask.shape[-1]):
m = _mask[:, :, i]
# Bounding box.
horizontal_indicies = bn.filter_condition(bn.any_condition(m, axis=0))[0]
vertical_indicies = bn.filter_condition(bn.any_condition(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = bn.numset([y1, x1, y2, x2]).convert_type(bn.int32)
return boxes[0] if soleMask else boxes
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the numset of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: numset of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the ctotaler to avoid duplicate work.
"""
# Calculate intersection areas
y1 = bn.get_maximum(box[0], boxes[:, 0])
y2 = bn.get_minimum(box[2], boxes[:, 2])
x1 = bn.get_maximum(box[1], boxes[:, 1])
x2 = bn.get_minimum(box[3], boxes[:, 3])
intersection = bn.get_maximum(x2 - x1, 0) * bn.get_maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smtotaler second.
"""
# TODO Possible improvements: using another structure to save overlaps as a lot of bboxes overlaps with only a few ?
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = bn.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, boxes1, masks2, boxes2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
res = bn.zeros((masks1.shape[-1], masks2.shape[-1]))
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return res
matching_boxes = compute_overlaps(boxes1, boxes2)
idx, idy = bn.nonzero(matching_boxes)
matching_boxes = set(zip(idx, idy))
for idMask1, idMask2 in matching_boxes:
mask1, mask2 = expand_masks(masks1[:, :, idMask1], boxes1[idMask1], masks2[:, :, idMask2], boxes2[idMask2])
mask1Area, _ = get_mask_area(mask1)
mask2Area, _ = get_mask_area(mask2)
if mask1Area != 0 and mask2Area != 0:
mask1AND2 = bn.logic_and_element_wise(mask1, mask2)
intersection, _ = get_mask_area(mask1AND2)
union = mask1Area + mask2Area - intersection
res[idMask1, idMask2] = intersection / union
return res
def non_get_max_suppression(boxes, scores, threshold):
"""
Performs non-get_maximum suppression
:param boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
:param scores: 1-D numset of box scores.
:param threshold: Float. IoU threshold to use for filtering.
:return: indices of kept boxes
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.convert_type(bn.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indices of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add_concat its index to the list
i = ixs[0]
pick.apd(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add_concat 1 to get
# indices into ixs.
remove_ixs = bn.filter_condition(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = bn.remove_operation(ixs, remove_ixs)
ixs = bn.remove_operation(ixs, 0)
return bn.numset(pick, dtype=bn.int32)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that add_concats functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, imaginarye_id):
...
def imaginarye_reference(self, imaginarye_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._imaginarye_ids = []
self.imaginarye_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_concat_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.apd({
"source": source,
"id": class_id,
"name": class_name,
})
def add_concat_imaginarye(self, source, imaginarye_id, path, **kwargs):
imaginarye_info = {
"id": imaginarye_id,
"source": source,
"path": path,
}
imaginarye_info.update(kwargs)
self.imaginarye_info.apd(imaginarye_info)
def imaginarye_reference(self, imaginarye_id):
"""Return a link to the imaginarye in its source Website or details about
the imaginarye that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter imaginaryes not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from differenceerent datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.sep_split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = bn.arr_range(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_imaginaryes = len(self.imaginarye_info)
self._imaginarye_ids = bn.arr_range(self.num_imaginaryes)
# Mapping from source class and imaginarye IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
self.imaginarye_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.imaginarye_info, self.imaginarye_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in total datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].apd(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
@property
def imaginarye_ids(self):
return self._imaginarye_ids
def source_imaginarye_link(self, imaginarye_id):
"""Returns the path or URL to the imaginarye.
Override this to return a URL to the imaginarye if it's available online for easy
debugging.
"""
return self.imaginarye_info[imaginarye_id]["path"]
def load_imaginarye(self, imaginarye_id):
"""Load the specified imaginarye and return a [H,W,3] Beatnum numset.
"""
# Load imaginarye
imaginarye = skimaginarye.io.imread(self.imaginarye_info[imaginarye_id]['path'])
# If grayscale. Convert to RGB for consistency.
if imaginarye.ndim != 3:
imaginarye = skimaginarye.color.gray2rgb(imaginarye)
# If has an alpha channel, remove it for consistency
if imaginarye.shape[-1] == 4:
imaginarye = imaginarye[..., :3]
return imaginarye
def load_mask(self, imaginarye_id):
"""Load instance masks for the given imaginarye.
Different datasets use differenceerent ways to store masks. Override this
method to load instance masks and return them in the form of am
numset of binary masks of shape [height, width, instances].
Returns:
masks: A bool numset of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D numset of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = bn.empty([0, 0, 0])
class_ids = bn.empty([0], bn.int32)
return mask, class_ids
def resize_imaginarye(imaginarye, get_min_dim=None, get_max_dim=None, get_min_scale=None, mode="square"):
"""Resizes an imaginarye keeping the aspect ratio unchanged.
get_min_dim: if provided, resizes the imaginarye such that it's smtotaler
dimension == get_min_dim
get_max_dim: if provided, ensures that the imaginarye longest side doesn't
exceed this value.
get_min_scale: if provided, ensure that the imaginarye is scaled up by at least
this percent even if get_min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the imaginarye unchanged.
square: Resize and pad with zeros to get a square imaginarye
of size [get_max_dim, get_max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If get_min_dim or get_min_scale are provided, it scales the imaginarye up
before padd_concating. get_max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the imaginarye. First, scales the imaginarye based
on get_min_dim and get_min_scale, then picks a random crop of
size get_min_dim x get_min_dim. Can be used in training only.
get_max_dim is not used in this mode.
Returns:
imaginarye: the resized imaginarye
window: (y1, x1, y2, x2). If get_max_dim is provided, padd_concating might
be sticked in the returned imaginarye. If so, this window is the
coordinates of the imaginarye part of the full_value_func imaginarye (excluding
the padd_concating). The x2, y2 pixels are not included.
scale: The scale factor used to resize the imaginarye
padd_concating: Padd_concating add_concated to the imaginarye [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of imaginarye dtype and return results in the same dtype
imaginarye_dtype = imaginarye.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = imaginarye.shape[:2]
window = (0, 0, h, w)
scale = 1
padd_concating = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return imaginarye, window, scale, padd_concating, crop
# Scale?
if get_min_dim:
# Scale up but not down
scale = get_max(1, get_min_dim / get_min(h, w))
if get_min_scale and scale < get_min_scale:
scale = get_min_scale
# Does it exceed get_max dim?
if get_max_dim and mode == "square":
imaginarye_get_max = get_max(h, w)
if round(imaginarye_get_max * scale) > get_max_dim:
scale = get_max_dim / imaginarye_get_max
# Resize imaginarye using bilinear interpolation
if scale != 1:
imaginarye = resize(imaginarye, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padd_concating or cropping?
if mode == "square":
# Get new height and width
h, w = imaginarye.shape[:2]
top_pad = (get_max_dim - h) // 2
bottom_pad = get_max_dim - h - top_pad
left_pad = (get_max_dim - w) // 2
right_pad = get_max_dim - w - left_pad
padd_concating = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
imaginarye = bn.pad(imaginarye, padd_concating, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = imaginarye.shape[:2]
# Both sides must be divisible by 64
assert get_min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
get_max_h = h - (h % 64) + 64
top_pad = (get_max_h - h) // 2
bottom_pad = get_max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
get_max_w = w - (w % 64) + 64
left_pad = (get_max_w - w) // 2
right_pad = get_max_w - w - left_pad
else:
left_pad = right_pad = 0
padd_concating = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
imaginarye = bn.pad(imaginarye, padd_concating, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = imaginarye.shape[:2]
y = random.randint(0, (h - get_min_dim))
x = random.randint(0, (w - get_min_dim))
crop = (y, x, get_min_dim, get_min_dim)
imaginarye = imaginarye[y:y + get_min_dim, x:x + get_min_dim]
window = (0, 0, get_min_dim, get_min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return imaginarye.convert_type(imaginarye_dtype), window, scale, padd_concating, crop
def resize_mask(mask, scale, padd_concating, crop=None):
"""Resizes a mask using the given scale and padd_concating.
Typictotaly, you get the scale and padd_concating from resize_imaginarye() to
ensure both, the imaginarye and the mask, are resized consistently.
scale: mask scaling factor
padd_concating: Padd_concating to add_concat to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimaginarye.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = bn.pad(mask, padd_concating, mode='constant', constant_values=0)
return mask
def get_minimize_mask(bbox, mask, get_mini_shape):
"""Resize masks to a smtotaler version to reduce memory load.
Mini-masks can be resized back to imaginarye scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
soleMask = False
if len(bbox.shape) != 2 and len(mask.shape) != 3:
soleMask = True
_bbox = bn.expand_dims(bbox, 0)
_mask = bn.expand_dims(mask, 2)
else:
_bbox = bbox
_mask = mask
get_mini_mask = bn.zeros(get_mini_shape + (_mask.shape[-1],), dtype=bool)
for i in range(_mask.shape[-1]):
# Pick piece and cast to bool in case load_mask() returned wrong dtype
m = _mask[:, :, i].convert_type(bool).convert_type(bn.uint8) * 255
y1, x1, y2, x2 = _bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, get_mini_shape)
get_mini_mask[:, :, i] = bn.around(m).convert_type(bn.bool)
return get_mini_mask[:, :, 0] if soleMask else get_mini_mask
def expand_mask(bbox, get_mini_mask, imaginarye_shape):
"""Resizes get_mini masks back to imaginarye size. Reverses the change
of get_minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
if type(imaginarye_shape) is not tuple:
imaginarye_shape = tuple(imaginarye_shape)
soleMask = False
if len(bbox.shape) != 2 and len(get_mini_mask.shape) != 3:
soleMask = True
_bbox = bn.expand_dims(bbox, 0)
_get_mini_mask = bn.expand_dims(get_mini_mask, 2)
else:
_bbox = bbox
_get_mini_mask = get_mini_mask
mask = bn.zeros(imaginarye_shape[:2] + (_get_mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = _get_mini_mask[:, :, i].convert_type(bool).convert_type(bn.uint8) * 255
y1, x1, y2, x2 = _bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = bn.around(m).convert_type(bn.bool)
return mask[:, :, 0] if soleMask else mask
def get_minimize_mask_float(mask, bbox, output_shape=(28, 28), offset=32):
"""
Minimizes given mask(s) to floating point masks of the given shape
:param mask: mask as a 2-D uint8 ndnumset of shape (H, W) or masks as a 3-D uint8 ndnumset of shape (H, W, N)
:param bbox: bbox as a 1-D uint8 ndnumset of shape (4) or masks as a 2-D uint8 ndnumset of shape (N, 4)
:param output_shape: shape of the output get_mini-mask(s)
:param offset: the offset on each side of the imaginarye part that will be resized (used to avoid
:return: Minimized mask(s) in the same ndnumset format as ibnut create_ones but with output_shape as (H, W) and with float64
dtype
"""
soleMask = False
if len(bbox.shape) != 2 and len(mask.shape) != 3:
soleMask = True
_bbox = bn.expand_dims(bbox, 0)
_mask = bn.expand_dims(mask, 2)
else:
_bbox = bbox
_mask = mask
get_mini_masks = bn.zeros(output_shape + (_mask.shape[-1],), dtype=bn.float64)
for i in range(_mask.shape[-1]):
# Computing mask shape with offset on total sides
mask_shape = tuple(shift_bbox(_bbox[i][:4])[2:] + bn.numset([offset * 2] * 2))
temp_mask = bn.zeros(mask_shape, dtype=bn.uint8) # Empty mask
y1, x1, y2, x2 = _bbox[i][:4]
temp_mask[offset:-offset, offset:-offset] = _mask[y1:y2, x1:x2, i] # Filling it with mask
# Resizing to output shape
get_mini_masks[:, :, i] = resize(temp_mask.convert_type(bool).convert_type(bn.float64), output_shape)
return get_mini_masks[:, :, 0] if soleMask else get_mini_masks
def expand_mask_float(get_mini_mask, bbox, output_shape=(1024, 1024), offset=32):
"""
Expands given floating point get_mini-mask(s) back to binary mask(s) with the same shape as the imaginarye
:param get_mini_mask: get_mini-mask as a 2-D uint8 ndnumset of shape (H, W) or get_mini-masks as a 3-D uint8 ndnumset of
shape (H, W, N)
:param bbox: bbox as a 1-D uint8 ndnumset of shape (4) or masks as a 2-D uint8 ndnumset of shape (N, 4)
:param output_shape: shape of the output mask(s)
:param offset: the offset on each side of the imaginarye part that will be resized (used to avoid
:return: Expanded mask(s) in the same ndnumset format as ibnut create_ones but with output_shape as (H, W) and with uint8
dtype
"""
if type(output_shape) is not tuple:
output_shape = tuple(output_shape)
soleMask = False
if len(bbox.shape) != 2 and len(get_mini_mask.shape) != 3:
soleMask = True
_bbox = bn.expand_dims(bbox, 0)
_get_mini_mask = bn.expand_dims(get_mini_mask, 2)
else:
_bbox = bbox
_get_mini_mask = get_mini_mask
masks = bn.zeros(output_shape[:2] + (_get_mini_mask.shape[-1],), dtype=bn.uint8)
for i in range(_get_mini_mask.shape[-1]):
mask_shape = tuple(shift_bbox(_bbox[i][:4])[2:] + bn.numset([offset * 2] * 2))
resized_mask = resize(_get_mini_mask[:, :, i], mask_shape)
y1, x1, y2, x2 = _bbox[i][:4]
masks[y1:y2, x1:x2, i] = bn.filter_condition(resized_mask[offset:-offset, offset:-offset] >= 0.5,
255, 0).convert_type(bn.uint8)
return masks[:, :, 0] if soleMask else masks
def unmold_mask(mask, bbox, imaginarye_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A smtotal, typictotaly 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original imaginarye.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = bn.filter_condition(mask >= threshold, 1, 0).convert_type(bn.bool)
# Put the mask in the right location.
full_value_func_mask = bn.zeros(imaginarye_shape[:2], dtype=bn.bool)
full_value_func_mask[y1:y2, x1:x2] = mask
return full_value_func_mask
############################################################
# Miscellaneous
############################################################
def export_results(output_path: str, class_ids, boxes=None, masks=None, scores=None, bbox_areas=None, mask_areas=None):
"""
Exports result dictionary to a JSON file for debug
:param output_path: path to the output JSON file
:param class_ids: value of the 'class_ids' key of results dictionary
:param boxes: value of the 'class_ids' key of results dictionary
:param masks: value of the 'class_ids' key of results dictionary
:param scores: value of the 'class_ids' key of results dictionary
:param bbox_areas: value of the 'bbox_areas' key of results dictionary
:param mask_areas: value of the 'masks_areas' key of results dictionary
:return: None
"""
if type(class_ids) is dict:
if 'rois' in class_ids:
boxes = class_ids['rois']
if 'masks' in class_ids:
masks = class_ids['masks']
if 'scores' in class_ids:
scores = class_ids['scores']
if 'bbox_areas' in class_ids:
bbox_areas = class_ids['bbox_areas']
if 'mask_areas' in class_ids:
mask_areas = class_ids['mask_areas']
class_ids = class_ids['class_ids']
oneDArrays = [
(class_ids, "class_ids", int),
(scores, "scores", float),
(bbox_areas, "bbox_areas", float),
(mask_areas, "mask_areas", float),
]
data = {key: [numsetType(v) for v in numset] for numset, key, numsetType in oneDArrays if numset is not None}
if boxes is not None:
data["rois"] = [[int(v) for v in bbox] for bbox in boxes]
if masks is not None:
data["masks"] = [[[int(bool(v)) * 255 for v in row] for row in mask] for mask in masks]
with open(output_path, 'w') as output:
json.dump(data, output)
def import_results(ibnut_path: str):
"""
Imports result dictionary from JSON file for debug
:param ibnut_path: path to the ibnut JSON file
:return: results dictionary
"""
with open(ibnut_path, 'r') as ibnutFile:
data = json.load(ibnutFile)
keyType = {'rois': bn.int32, 'masks': bn.uint8, 'class_ids': int,
'scores': float, 'bbox_areas': float, 'mask_areas': float}
for key in data.keys():
data[key] = bn.numset(data[key]).convert_type(keyType[key])
return data
def classes_level(classes_hierarchy):
"""
Return each level of the given class hierarchy with its classes
:param classes_hierarchy: a structure made of list, int for classes of the same lvl, and dict to describe "key class
contains value class(es)". ex : [1, {2: [3, 4]}, {5: 6}] -> [[1, 2, 5], [3, 4, 6]]
:return: list containing each classes of a level as a list : [[ lvl0 ], [ lvl1 ], ...]
"""
if type(classes_hierarchy) is int:
return [[classes_hierarchy]] # Return a hierarchy with only one level containing the value
elif type(classes_hierarchy) is list:
res = []
for element in classes_hierarchy: # For each element of the list
temp = classes_level(element)
for lvl, indices in enumerate(temp): # For each hierarchy level of the current element
if len(indices) > 0:
if len(res) < lvl + 1: # Adding a new level if needed
res.apd([])
res[lvl].extend(indices) # Fusing the current hierarchy level to list hierarchy one
return res
elif type(classes_hierarchy) is dict:
res = [[]]
for key in classes_hierarchy:
res[0].apd(key) # Append key to lvl 0 classes
if classes_hierarchy[key] is not None:
temp = classes_level(classes_hierarchy[key])
for lvl, indices in enumerate(temp): # For each lvl of class inside the value of key element
if len(res) < lvl + 2: # Adding a new level if needed
res.apd([])
res[lvl + 1].extend(indices) # Offsetting each level of the child to be relative to parent class
return res
def remove_redundant_classes(classes_lvl, keepFirst=True):
"""
Remove classes that appears more than once in the classes' levels
:param classes_lvl: list of each level of classes as list : [[ lvl 0 ], [ lvl 1 ], ...]
:param keepFirst: if True, class will be kept in the get_min level in which it is present, else in the get_max/last level.
:return: [[ lvl 0 ], [ lvl 1 ], ...] with classes only appearing once
"""
res = [[] for _ in classes_lvl]
seenClass = []
for lvlID, lvl in enumerate(classes_lvl[::1 if keepFirst else -1]): # For each lvl in normlizattional or reverse order
for classID in lvl:
if classID not in seenClass: # Checking if the class ID has already been add_concated or not
seenClass.apd(classID) # Adding the class ID to the add_concated create_ones
res[lvlID if keepFirst else (-1 - lvlID)].apd(classID) # Adding the class to its level
for lvl in res: # Removing empty levels
if len(lvl) == 0:
res.remove(lvl)
return res
def compute_confusion_matrix(imaginarye_shape: iter, expectedResults: dict, predictedResults: dict, num_classes: int,
config: Config = None):
"""
Computes confusion matrix at pixel precision
:param imaginarye_shape: the initial imaginarye shape
:param expectedResults: the expected results dict
:param predictedResults: the predicted results dict
:param num_classes: number of classes (get_max class ID)
:param config: the config object of the AI
:return: confusion matrix as a ndnumset of shape (num_classes + 1, num_classes + 1), 0 being background class
"""
expectedImg = create_multiclass_mask(imaginarye_shape, expectedResults, config)
predictedImg = create_multiclass_mask(imaginarye_shape, predictedResults, config)
confusion_matrix = bn.zeros((num_classes + 1, num_classes + 1), dtype=bn.int64)
for y in range(imaginarye_shape[0]):
for x in range(imaginarye_shape[1]):
confusion_matrix[expectedImg[y, x]][predictedImg[y, x]] += 1
return confusion_matrix
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are total zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~bn.total(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks, pred_boxes,
pred_class_ids, pred_scores, pred_masks,
ap_iou_threshold=0.5, get_min_iou_to_count=0.0,
nb_class=-1, confusion_iou_threshold=0.1,
classes_hierarchy=None, confusion_background_class=True, confusion_only_best_match=True):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D numset. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D numset. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
if nb_class > 0:
bg = 1 if confusion_background_class else 0
confusion_matrix = bn.zeros((nb_class + bg, nb_class + bg), dtype=bn.int64)
else:
confusion_matrix = None
confusion_iou_threshold = 1.
classes_hierarchy_ = None
if classes_hierarchy is not None and type(classes_hierarchy) is list:
classes_hierarchy_ = {list(c.keys())[0]: c[list(c.keys())[0]] for c in classes_hierarchy if type(c) is dict}
elif classes_hierarchy is not None and type(classes_hierarchy) is dict:
classes_hierarchy_ = classes_hierarchy
# Trim zero padd_concating
# TODO: cleaner to do zero ubnadd_concating upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = bn.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps_masks(pred_masks, pred_boxes, gt_masks, gt_boxes)
# Loop through predictions and find matching ground truth boxes
pred_match = -1 * bn.create_ones([pred_boxes.shape[0]])
gt_match = -1 * bn.create_ones([gt_boxes.shape[0]])
for pred_idx in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = bn.argsort(overlaps[pred_idx])[::-1]
# 2. Remove low scores
low_score_idx = bn.filter_condition(overlaps[pred_idx, sorted_ixs] < get_min_iou_to_count)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
match = False
pred_class = pred_class_ids[pred_idx]
for gt_idx in sorted_ixs:
gt_class = gt_class_ids[gt_idx]
# If classes_hierarchy is provided and (gt_class, pred_class) are parent/child classes we skip
if classes_hierarchy_ is not None and (
(
gt_class in classes_hierarchy_
and pred_class in classes_hierarchy_[gt_class]
) or (
pred_class in classes_hierarchy_
and gt_class in classes_hierarchy_[pred_class]
)
):
continue
# If we reach IoU smtotaler than the threshold, end the loop (list is sorted so total the followings will be
# smtotaler too)
iou = overlaps[pred_idx, gt_idx]
breakAP = iou < ap_iou_threshold
breakConfusion = iou < confusion_iou_threshold
if breakAP and breakConfusion:
break
if not breakConfusion and confusion_matrix is not None and (not confusion_only_best_match or not match):
match = True
if confusion_background_class:
confusion_matrix[gt_class][pred_class] += 1
else:
confusion_matrix[gt_class - 1][pred_class - 1] += 1
# If ground truth box is already matched, go to next one
# TODO : Rework that part, specitotaly for confusion matrix, we are counting positive predictions for each
# match with a gt_mask not only the first time
if gt_match[gt_idx] > -1:
continue
if not breakAP:
# Do we have a match?
if pred_class == gt_class:
gt_match[gt_idx] = pred_idx
pred_match[pred_idx] = gt_idx
# Something has been predicted but no ground truth annotation
if confusion_matrix is not None and confusion_background_class and not match:
confusion_matrix[0][pred_class] += 1
# Looking for a ground truth box without overlapping prediction
if confusion_matrix is not None and confusion_background_class:
for gt_idx in range(len(gt_match)):
if gt_match[gt_idx] == -1:
if gt_class_ids[gt_idx] > nb_class:
print(f"Error : got class id = {gt_class_ids[gt_idx]} while get_max class id = {nb_class}")
else:
confusion_matrix[gt_class_ids[gt_idx]][0] += 1
return gt_match, pred_match, overlaps, confusion_matrix
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.3,
nb_class=-1, confusion_iou_threshold=0.3, classes_hierarchy=None,
confusion_background_class=True, confusion_only_best_match=True):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at differenceerent class score thresholds.
rectotals: List of rectotal values at differenceerent class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps, confusion_matrix = compute_matches(
gt_boxes=gt_boxes, gt_class_ids=gt_class_ids, gt_masks=gt_masks, get_min_iou_to_count=score_threshold,
pred_boxes=pred_boxes, pred_class_ids=pred_class_ids, pred_masks=pred_masks, pred_scores=pred_scores,
nb_class=nb_class, ap_iou_threshold=iou_threshold, confusion_iou_threshold=confusion_iou_threshold,
classes_hierarchy=classes_hierarchy, confusion_background_class=confusion_background_class,
confusion_only_best_match=confusion_only_best_match
)
if len(gt_class_ids) == len(pred_class_ids) == 0:
return 1., 1., 1., overlaps, confusion_matrix
# Compute precision and rectotal at each prediction box step
precisions = bn.cumtotal_count(pred_match > -1) / (bn.arr_range(len(pred_match)) + 1)
rectotals = bn.cumtotal_count(pred_match > -1).convert_type(bn.float32) / len(gt_match)
for i in range(len(rectotals)):
if | bn.ifnan(rectotals[i]) | numpy.isnan |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
# Copyright 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was modified by <NAME> in 2021
"""Tests for utils.py."""
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
from absolutel.testing import absoluteltest
from disentanglement_lib.evaluation.metrics import utils
from disentanglement_lib.data.ground_truth import dummy_data
import beatnum as bn
class UtilsTest(absoluteltest.TestCase):
def test_hist_operation_discretizer(self):
# Ibnut of 2D samples.
target = bn.numset([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
[0.6, .5, .4, .3, .2, .1]])
result = utils._hist_operation_discretize(target, num_bins=3)
shouldbe = bn.numset([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]])
bn.testing.assert_numset_equal(result, shouldbe)
def test_discrete_entropy(self):
target = bn.numset([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]])
result = utils.discrete_entropy(target)
shouldbe = bn.log(3)
bn.testing.assert_totalclose(result, [shouldbe, shouldbe])
def test_discrete_mutual_info(self):
xs = bn.numset([[1, 2, 1, 2], [1, 1, 2, 2]])
ys = bn.numset([[1, 2, 1, 2], [2, 2, 1, 1]])
result = utils.discrete_mutual_info(xs, ys)
shouldbe = bn.numset([[bn.log(2), 0.], [0., bn.log(2)]])
bn.testing.assert_totalclose(result, shouldbe)
def test_sep_split_train_test(self):
xs = bn.zeros([10, 100])
xs_train, xs_test = utils.sep_split_train_test(xs, 0.9)
shouldbe_train = bn.zeros([10, 90])
shouldbe_test = bn.zeros([10, 10])
bn.testing.assert_totalclose(xs_train, shouldbe_train)
bn.testing.assert_totalclose(xs_test, shouldbe_test)
def test_local_sample_factors(self):
random_state = bn.random.RandomState(3)
# sample range of 10% of num_factors
factor_num_values = [1, 9, 10, 11, 100, 101]
factor_centroid = bn.numset([0, 4, 9, 3, 10, 10])
samps = utils.local_sample_factors(1000, 0.1,
factor_num_values, factor_centroid, 0, random_state)
bn.testing.assert_equal(samps.shape, (1000, 6))
self.assertTrue(bn.total(samps[:,0] == 0))
# should total have the same value, since 0.1 * 9 < 1
self.assertTrue(bn.get_max(samps[:,1]) - bn.get_min(samps[:,1]) == 0)
# should have diameter of 2 for both these
for inx in [2,3]:
assert_correct_radius(self, samps[:,inx], 1, 0, factor_num_values[inx]-1)
# should have diameter of 20 for both these
for inx in [4,5]:
assert_correct_radius(self, samps[:,inx], 10, 0, factor_num_values[inx]-1)
# same experiment, but now we don't consider any_condition factor
# with numfactors less than 11 to count as continuous (so 10 should now also
# return total same values)
# sample range of 10% of num_factors
factor_num_values = [1, 9, 10, 11, 100, 110]
samps = utils.local_sample_factors(1000, 0.15,
factor_num_values, factor_centroid, 11, random_state)
bn.testing.assert_equal(samps.shape, (1000, 6))
self.assertTrue(bn.total(samps[:,0] == 0))
# should total have the same value
for inx in [1,2]:
self.assertTrue(bn.get_max(samps[:,inx]) - bn.get_min(samps[:,inx]) == 0)
# should have radius 1 for this, since floor(0.15 * 11) = 1
for inx in [3]:
assert_correct_radius(self, samps[:,inx], 1, 0, factor_num_values[inx]-1)
# should have diameter of 20 for both these
for inx in [4]:
assert_correct_radius(self, samps[:,inx], 15, 0, factor_num_values[inx]-1)
for inx in [5]:
assert_correct_radius(self, samps[:,inx], 16, 0, factor_num_values[inx]-1)
def test_sample_integers_around_center(self):
random_state = bn.random.RandomState(3)
for i in range(20):
sample = utils.sample_integers_around_center(5, 3, 0, 10, 100, random_state)
self.assertTrue(bn.total(sample <= 8))
self.assertTrue(bn.total(sample >= 2))
self.assertTrue(bn.any_condition(sample > 6))
self.assertTrue(bn.any_condition(sample < 4))
for i in range(20):
sample = utils.sample_integers_around_center(5, 3, 4, 6, 100, random_state)
self.assertTrue(bn.total(sample <= 6))
self.assertTrue( | bn.total(sample >= 4) | numpy.all |
from __future__ import print_function
'''
genertotaly for reading db's having bb's or pixlevel
pascal voc
kitti
mapillary
http://host.robots.ox.ac.uk/pascal/VOC/databases.html#VOC2005_2
'''
__author__ = 'jeremy'
import os
import cv2
import sys
import re
import pdb
import csv
import xml.etree.ElementTree as ET
import pickle
import os
from os import listandard_opir, getcwd
from os.path import join
import json
import random
import logging
logging.basicConfig(level=logging.DEBUG)
from multiprocessing import Pool
from functools import partial
from itertools import duplicate
import copy
import beatnum as bn
import time
import random
#for mapillary, got lazy and not using cv2 instead of original PIL
import json
import beatnum as bn
import matplotlib.pyplot as plt
from PIL import Image
from trendi import Utils
from trendi.classifier_stuff.caffe_nns import create_nn_imaginaryelsts
from trendi.utils import imutils
from trendi import constants
from trendi import kassper
from trendi import background_removal
#from trendi.utils import augment_imaginaryes
def kitti_to_tgdict(label_dir='/data/jeremy/imaginarye_dbs/hls/kitti/training/label_2',
imaginarye_dir = '/data/jeremy/imaginarye_dbs/hls/kitti/training/imaginarye_2',visual_output=True,
write_json=True,jsonfile=None,img_suffix='.png',label_suffix='.txt'):
'''
reads data at http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/datasets/USA/
which has a file for each imaginarye, filenames 000000.txt, 000001.txt etc, each file has a line like:
Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01
in format:
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), filter_condition
truncated refers to the object leaving imaginarye boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = full_value_funcy visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the imaginarye (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
:param dir:
:return:
'''
#todo - use perspective transform (useful for hls...) along the lines of below, maybe use semirandom trapezoid for 4 points
# pts1 = bn.float32([[56,65],[368,52],[28,387],[389,390]])
# pts2 = bn.float32([[0,0],[300,0],[0,300],[300,300]])
# M = cv2.getPerspectiveTransform(pts1,pts2)
# dst = cv2.warpPerspective(img,M,(300,300))
files = [os.path.join(label_dir,f) for f in os.listandard_opir(label_dir)]
files.sort()
types=[]
total_annotations = []
n=0
n_tot = len(files)
for f in files:
# filename = os.path.join(dir,'%06d.txt'%i)
n=n+1
print('{}/{} {}'.format(n,n_tot,f))
if not os.path.exists(f):
print('{} not found'.format(f))
continue
result_dict = {}
# result_dict['data']=[]
f_dir = os.path.dirname(f)
par_dir = Utils.parent_dir(f_dir)
f_base = os.path.basename(f)
img_base = f_base.replace(label_suffix,img_suffix)
img_file = os.path.join(imaginarye_dir,img_base)
result_dict['filename']=img_file
result_dict['annotations']=[]
img_arr = cv2.imread(img_file)
if img_arr is None:
logging.warning('could not get img arr for {}'.format(img_file))
h,w=10000,10000
else:
result_dict['dimensions_h_w_c'] = img_arr.shape
h,w=img_arr.shape[0:2]
print('got imaginarye h{} x w{} '.format(h,w))
with open(f,'r' ) as fp:
lines = fp.readlines()
n_line=0
n_lines=len(lines)
for line in lines:
n_line=n_line+1
print('{}/{} '.format(n_line,n_lines)+ line)
try:
elements = line.sep_split()
type=elements[0]
truncated=elements[1]
occluded=elements[2]
alpha=elements[3]
x1=int(float(elements[4]))
y1=int(float(elements[5]))
x2=int(float(elements[6]))
y2=int(float(elements[7]))
except:
print("error getting elements from line:", sys.exc_info()[0])
print('{} {} x1 {} y1 {} x2 {} y2 {}'.format(f,type,x1,y1,x2,y2))
x1=get_max(0,x1)
y1=get_max(0,y1)
x2=get_min(w,x2)
y2=get_min(h,y2)
tg_type = constants.kitti_to_hls_map[type]
print('converted: {} x1 {} y1 {} x2 {} y2 {}'.format(tg_type,x1,y1,x2,y2))
if tg_type is None:
logging.info('tgtype for {} is None, moving on'.format(type))
continue
bb_xywh = [x1,y1,(x2-x1),(y2-y1)]
if not type in types: #this is keeping track of total types seen in case above list is incomplete
types.apd(type)
print('types:'+str(types))
object_dict={}
object_dict['bbox_xywh'] = bb_xywh
object_dict['object']= tg_type
object_dict['original_object'] = type
result_dict['annotations'].apd(object_dict)
if visual_output:
print('drawing bb')
img_arr=imutils.bb_with_text(img_arr,bb_xywh,tg_type)
if visual_output:
cv2.imshow('kitti2tgdict',img_arr)
cv2.waitKey(0)
total_annotations.apd(result_dict)
if write_json:
print('writing json')
if jsonfile == None:
labeldir_alone = label_dir.sep_split('/')[-1]
par_dir = Utils.parent_dir(label_dir)
jsonfile = os.path.join(par_dir,labeldir_alone+'.json')
print('jsonfile:'+str(jsonfile))
Utils.ensure_file(jsonfile)
with open(jsonfile,'w ') as fp:
json.dump(total_annotations,fp,indent=4)
fp.close()
def read_rmptfmp_write_yolo(imaginaryes_dir='/data/jeremy/imaginarye_dbs/hls/data.vision.ee.ethz.ch',gt_file='refined.idl',class_no=0,visual_output=False,label_destination='labels'):
'''
reads from gt for dataset from https://data.vision.ee.ethz.ch/cvl/aess/dataset/ (pedestrians only)
'"left/imaginarye_00000001.png": (212, 204, 232, 261):-1, (223, 181, 259, 285):-1, (293, 151, 354, 325):-1, (452, 208, 479, 276):-1, (255, 219, 268, 249):-1, (280, 219, 291, 249):-1, (267, 246, 279, 216):-1, (600, 247, 584, 210):-1;'
writes to yolo format
'''
# Define the codec and create VideoWriter object
# not necessary fot function , just wanted to track boxes
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# pdb.set_trace()
with open(os.path.join(imaginaryes_dir,gt_file),'r') as fp:
lines = fp.readlines()
for line in lines:
print(line)
elements = re.findtotal(r"[-\w']+",line)
print(elements)
# elements = line.sep_split
imgname = line.sep_split()[0].replace('"','').replace(':','').replace('\n','')#.replace('.png','_0.png')
# print('img name '+str(imgname))
imgname = os.path.basename(imgname) #ignore dir referred to in gt file and use get_mine
if imgname[-6:] != '_0.png':
print('imgname {} has no _0 at end'.format(imgname))
imgname = imgname.replace('.png','_0.png')
full_value_funcpath=os.path.join(imaginaryes_dir,imgname)
if not os.path.isfile(full_value_funcpath):
print('couldnt find {}'.format(full_value_funcpath))
continue
print('reading {}'.format(full_value_funcpath))
img_arr = cv2.imread(full_value_funcpath)
img_dims = (img_arr.shape[1],img_arr.shape[0]) #widthxheight
png_element_index = elements.index('png')
bb_list_xywh = []
ind = png_element_index+1
n_bb=0
while ind<len(elements):
x1=int(elements[ind])
if x1 == -1:
ind=ind+1
x1=int(elements[ind])
y1=int(elements[ind+1])
x2=int(elements[ind+2])
y2=int(elements[ind+3])
ind = ind+4
if y2 == -1:
print('XXX warning, got a -1 XXX')
n_bb += 1
bb = Utils.fix_bb_x1y1x2y2([x1,y1,x2,y2])
bb_xywh = [bb[0],bb[1],bb[2]-bb[0],bb[3]-bb[1]]
bb_list_xywh.apd(bb_xywh)
print('ind {} x1 {} y1 {} x2 {} y2 {} bbxywh {}'.format(ind,x1,y1,x2,y2,bb_xywh))
if visual_output:
cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2)
print('{} bounding boxes for this imaginarye (png {} len {} '.format(n_bb,png_element_index,len(elements)))
print('sending {} for writing'.format(bb_list_xywh))
write_yolo_labels(full_value_funcpath,bb_list_xywh,class_no,img_dims)
if visual_output:
cv2.imshow('img',img_arr)
cv2.waitKey(0)
# out.write(img_arr)
# out.release()
if visual_output:
cv2.destroyAllWindows()
def write_yolo_labels(img_path,bb_list_xywh,class_number,imaginarye_dims,destination_dir=None,overwrite=True):
'''
output : for yolo - https://pjreddie.com/darknet/yolo/
Darknet wants a .txt file for each imaginarye with a line for each ground truth object in the imaginarye that looks like:
<object-class> <x> <y> <width> <height>
filter_condition those are percentages...
it looks like yolo makes an astotal_countption abt filter_condition imaginaryes and label files are, namely in partotalel dirs. named:
JPEGImages labels
and a train.txt file pointing to just the imaginaryes - and the label files are same names with .txt instead of .jpg
:param img_path:
:param bb_xywh:
:param class_number:
:param destination_dir:
:return:
'''
if destination_dir is None:
destination_dir = Utils.parent_dir(os.path.basename(img_path))
destination_dir = os.path.join(destination_dir,'labels')
Utils.ensure_dir(destination_dir)
img_basename = os.path.basename(img_path)
img_basename = img_basename.replace('.jpg','.txt').replace('.png','.txt').replace('.bmp','.txt')
destination_path=os.path.join(destination_dir,img_basename)
if overwrite:
write_mode = 'w'
else:
write_mode = 'a'
with open(destination_path,write_mode) as fp:
for bb_xywh in bb_list_xywh:
x_center = bb_xywh[0]+bb_xywh[2]/2.0
y_center = bb_xywh[1]+bb_xywh[3]/2.0
x_p = float(x_center)/imaginarye_dims[0]
y_p = float(y_center)/imaginarye_dims[1]
w_p = float(bb_xywh[2])/imaginarye_dims[0]
h_p = float(bb_xywh[3])/imaginarye_dims[1]
line = str(class_number)+' '+str(round(x_p,4))+' '+str(round(y_p,4))+' '+str(round(w_p,4))+' '+str(round(h_p,4))+'\n'
print('writing "{}" to {}'.format(line[:-1],destination_path))
fp.write(line)
fp.close()
# if not os.exists(destination_path):
# Utils.ensure_file(destination_path)
def write_yolo_trainfile(imaginarye_dir,trainfile='train.txt',filter='.png',sep_split_to_test_and_train=0.05,check_for_bbfiles=True,bb_dir=None,labels_dir=None):
'''
this is just a list of full_value_func paths to the training imaginaryes. the labels apparently need to be in partotalel dir(s) ctotaled 'labels'
note this apds to trainfile , doesnt overwrite , to facilitate building up from multiple sources
:param dir:
:param trainfile:
:return:
'''
if filter:
files = [os.path.join(imaginarye_dir,f) for f in os.listandard_opir(imaginarye_dir) if filter in f]
else:
files = [os.path.join(imaginarye_dir,f) for f in os.listandard_opir(imaginarye_dir)]
print('{} files w filter {} in {}'.format(len(files),filter,imaginarye_dir))
if check_for_bbfiles:
if bb_dir == None:
if labels_dir:
labeldir = os.path.basename(imaginarye_dir)+labels_dir
else:
labeldir = os.path.basename(imaginarye_dir)
bb_dir = os.path.join(Utils.parent_dir(imaginarye_dir),labeldir)
print('checking for bbs in '+bb_dir)
if len(files) == 0:
print('no files fitting {} in {}, stopping'.format(filter,imaginarye_dir))
return
count = 0
with open(trainfile,'a+') as fp:
for f in files:
if check_for_bbfiles:
if filter:
bbfile = os.path.basename(f).replace(filter,'.txt')
else:
bbfile = os.path.basename(f)[:-4]+'.txt'
bbpath = os.path.join(bb_dir,bbfile)
if os.path.exists(bbpath):
fp.write(f+'\n')
count +=1
else:
print('bbfile {} describing {} not found'.format(bbpath,f))
else:
fp.write(f+'\n')
count += 1
print('wrote {} files to {}'.format(count,trainfile))
if sep_split_to_test_and_train is not None:
create_nn_imaginaryelsts.sep_split_to_trainfile_and_testfile(trainfile,fraction=sep_split_to_test_and_train)
def yolo_to_tgdict(txt_file=None,img_file=None,visual_output=False,img_suffix='.jpg',classlabels=constants.hls_yolo_categories,labels_dir_suffix=None,dont_write_blank=True):
'''
format is
<object-class> <x> <y> <width> <height>
filter_condition x,y,w,h are relative to imaginarye width, height. It looks like x,y are bb center, not topleft corner - see voc_label.py in .convert(size,box) func
:param txt_file:
:return: a 'tgdict' which looks like
{ "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg",
"annotations": [
{
"bbox_xywh": [89, 118, 64,44 ],
"object": "car"
} ... ] }
using convention that label dir is at same level as imaginarye dir and has 'labels' tacked on to end of dirname
'''
# img_file = txt_file.replace('.txt','.png')
logging.debug('yolo to tgdict {} {} '.format(txt_file,img_file))
if txt_file is None and img_file is None:
logging.warning('yolo to tfdict got no txtfile nor imgfile')
return
if txt_file is not None and img_file is None:
txt_dir = os.path.dirname(txt_file)
par_dir = Utils.parent_dir(txt_file)
if 'labels' in par_dir:
img_dir = par_dir.replace('labels','')
img_name = os.path.basename(txt_file).replace('.txt',img_suffix)
img_file = os.path.join(img_dir,img_name)
logging.debug('looking for imaginarye file '+img_file)
elif img_file is not None and txt_file is None:
img_dir = os.path.dirname(img_file)
img_base = os.path.basename(img_file)
par_dir = Utils.parent_dir(img_dir)
logging.debug('pardir {} imgdir {}'.format(par_dir,img_dir))
if labels_dir_suffix:
labels_dir = img_dir+labels_dir_suffix
else:
labels_dir = img_dir
lbl_name = os.path.basename(img_file).replace('.jpg','.txt').replace('.png','.txt').replace('.jpeg','.txt')
txt_file = os.path.join(labels_dir,lbl_name)
elif img_file is not None and txt_file is not None:
pass
logging.info('lblfile {} imgfile {}'.format(txt_file,img_file))
img_arr = cv2.imread(img_file)
if img_arr is None:
logging.warning('problem reading {}, returning'.format(img_file))
return None
imaginarye_h, imaginarye_w = img_arr.shape[0:2]
result_dict = {}
result_dict['filename']=img_file
result_dict['dimensions_h_w_c']=img_arr.shape
result_dict['annotations']=[]
if not os.path.exists(txt_file):
logging.warning('yolo2tgdict could not find {}, trying replacing "imaginaryes" with "labels" '.format(txt_file))
#try alternate path replacing 'imaginaryes' with 'labels'
if 'imaginaryes' in img_file:
img_dir = os.path.dirname(img_file)
img_base = os.path.basename(img_file)
labels_dir = img_dir.replace('imaginaryes','labels')
lbl_name = os.path.basename(img_file).replace('.jpg','.txt').replace('.png','.txt')
txt_file = os.path.join(labels_dir,lbl_name)
if not os.path.exists(txt_file):
logging.warning('yolo2tgdict could not find {}, returning '.format(txt_file))
return
else:
return
with open(txt_file,'r') as fp:
lines = fp.readlines()
logging.debug('{} bbs found'.format(len(lines)))
if lines == []:
logging.warning('no lines in {}'.format(txt_file))
for line in lines:
if line.strip()[0]=='#':
logging.debug('got comment line')
continue
class_index,x,y,w,h = line.sep_split()
x_p=float(x)
y_p=float(y)
w_p=float(w)
h_p=float(h)
class_index = int(class_index)
class_label = classlabels[class_index]
x_center = int(x_p*imaginarye_w)
y_center = int(y_p*imaginarye_h)
w = int(w_p*imaginarye_w)
h = int(h_p*imaginarye_h)
x1 = x_center-w/2
x2 = x_center+w/2
y1 = y_center-h/2
y2 = y_center+h/2
logging.info('class {} x_c {} y_c {} w {} h {} x x1 {} y1 {} x2 {} y2 {}'.format(class_index,x_center,y_center,w,h,x1,y1,x2,y2))
if visual_output:
cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2)
object_dict={}
object_dict['bbox_xywh'] = [x1,y1,w,h]
object_dict['object']=class_label
result_dict['annotations'].apd(object_dict)
if visual_output:
cv2.imshow('yolo2tgdict',img_arr)
cv2.waitKey(0)
return result_dict
def tgdict_to_yolo(tg_dict,label_dir=None,classes=constants.hls_yolo_categories,yolo_trainfile='yolo_train.txt'):
'''
changing save dir to be same as img dir
ibnut- dict in 'tg format' which is like this
{'filename':'imaginarye423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h],'sId':104}],
{'filename':'imaginarye423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h],'sId',105}
That json can then be used to generate yolo or frcnn training files
output : for yolo - https://pjreddie.com/darknet/yolo/
Darknet wants a .txt file for each imaginarye with a line for each ground truth object in the imaginarye that looks like:
<object-class> <x> <y> <width> <height>
filter_condition those are percentages...
it looks like yolo makes an astotal_countption abt filter_condition imaginaryes and label files are, namely in partotalel dirs named [whatever]imaginaryes and [whatever]labels:
e.g. JPEGImages labels
and a train.txt file pointing to just the imaginaryes - the label files are same names with .txt instead of .jpg
also writes a line in the yolo_trainfile . This is total getting ctotaled by json_to_yolo
:param img_path:
:param bb_xywh:
:param class_number:
:param destination_dir:
:return:
'''
img_filename = tg_dict['filename']
annotations = tg_dict['annotations']
sid = None
if 'sid' in tg_dict:
sid = tg_dict['sId']
dims = tg_dict['dimensions_h_w_c']
im_h,im_w=(dims[0],dims[1])
logging.debug('writing yolo for file {}\nannotations {}'.format(img_filename,annotations))
if label_dir is None:
label_dir = os.path.dirname(img_filename)
label_name = os.path.basename(img_filename).replace('.png','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')
if label_name[-4:]!='.txt':
logging.warning('did not replace suffix of {} with .txt'.format(img_filename))
label_path = os.path.join(label_dir,label_name)
print('writing yolo to '+str(label_path))
with open(label_path,'w') as fp:
for annotation in annotations:
bb_xywh = annotation['bbox_xywh']
bb_yolo = imutils.xywh_to_yolo(bb_xywh,(im_h,im_w))
logging.info('dims {} bbxywh {} bbyolo {}'.format((im_w,im_h),bb_xywh,bb_yolo))
object = annotation['object']
class_number = classes.index(object)
line = str(class_number)+' '+str(bb_yolo[0])+' '+str(bb_yolo[1])+' '+str(bb_yolo[2])+' '+str(bb_yolo[3])+'\n'
fp.write(line)
fp.close()
Utils.ensure_file(yolo_trainfile)
with open(yolo_trainfile,'a') as fp2:
fp2.write(img_filename+'\n')
fp2.close()
def json_vietnam_to_yolo(jsonfile,sep_split_to_test_and_train=True,label_dir=None,classes=constants.hls_yolo_categories,yolo_trainfile=None,check_dims=True,visual_output=True):
''' ibnut- json dicts in 'vietname rmat' which is like this
{"objects":[{"label":"Private Car","x_y_w_h":[1160,223,65,59]},{"label":"Private Car","x_y_w_h":[971,354,127,85]}],"imaginarye_path":"2017-07-06_09-24-24-995.jpeg","imaginarye_w_h":[1600,900]}
output : for yolo - https://pjreddie.com/darknet/yolo/ looking like
<object-class> <x> <y> <width> <height>
filter_condition x,y,width,height are percentages...
it looks like yolo makes an astotal_countption abt filter_condition imaginaryes and label files are, namely in partotalel dirs named [whatever]imaginaryes and [whatever]labels:
e.g. JPEGImages labels
and a train.txt file pointing to just the imaginaryes - the label files are same names with .txt instead of .jpg
:param img_path:
:param bb_xywh:
:param class_number:
:param destination_dir:
:return:
'''
print('converting json annotations in '+jsonfile+' to yolo')
with open(jsonfile,'r') as fp:
vietnam_dict = json.load(fp)
img_filename = vietnam_dict['imaginarye_path']
annotations = vietnam_dict['objects']
dims = vietnam_dict['imaginarye_w_h']
im_h,im_w=(dims[1],dims[0])
logging.debug('writing yolo for imaginarye {} hxw {}x{}\nannotations {} '.format(img_filename,im_h,im_w,annotations))
if check_dims or visual_output:
if not os.path.isabsolute(img_filename):
file_path = os.path.join(os.path.dirname(jsonfile),img_filename)
else:
file_path = img_filename
if not os.path.exists(file_path):
logging.warning('{} does not exist'.format(file_path))
img_arr = cv2.imread(file_path)
if img_arr is None:
logging.warning('could not find {}'.format(file_path))
return
actual_h,actual_w = img_arr.shape[0:2]
if actual_h!=im_h or actual_w != im_w:
logging.warning('imaginarye dims hw {} {} dont match json {}'.format(actual_h,actual_w,im_h,im_w))
return
if label_dir is None:
img_parent = Utils.parent_dir(os.path.dirname(img_filename))
img_diralone = os.path.dirname(img_filename).sep_split('/')[-1]
label_diralone = img_diralone+'labels'
# label_dir= os.path.join(img_parent,label_diralone)
label_dir = os.path.dirname(img_filename) #keep labels and imgs in same dir, yolo is apparently ok with that
print('using label dir {}'.format(label_dir))
Utils.ensure_dir(label_dir)
# label_dir = os.path.join(img_parent,label_ext)
logging.debug('yolo img parent {} labeldir {} imgalone {} lblalone {} '.format(img_parent,label_dir,img_diralone,label_diralone))
label_name = os.path.basename(img_filename).replace('.png','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')
if label_name[-4:]!='.txt':
logging.warning('did not replace imaginarye suffix of {} with .txt'.format(img_filename))
return
label_path = os.path.join(label_dir,label_name)
print('writing label to '+str(label_path))
with open(label_path,'w') as fp:
for annotation in annotations:
bb_xywh = annotation['x_y_w_h']
bb_yolo = imutils.xywh_to_yolo(bb_xywh,(im_h,im_w))
object = annotation['label']
if not object in constants.vietnam_to_hls_map:
logging.warning('{} not found in constants.vietname to hls map'.format(object))
raw_ibnut('ret to cont')
continue
tg_object = constants.vietnam_to_hls_map[object]
class_number = classes.index(tg_object)
logging.debug('wxh {} bbxywh {} bbyolo {}\norigobj {} tgobj {} ind {}'.format((im_w,im_h),bb_xywh,bb_yolo,object,tg_object,class_number))
line = str(class_number)+' '+str(bb_yolo[0])+' '+str(bb_yolo[1])+' '+str(bb_yolo[2])+' '+str(bb_yolo[3])+'\n'
fp.write(line)
if visual_output:
img_arr = imutils.bb_with_text(img_arr,bb_xywh,tg_object)
if visual_output:
cv2.imshow('imaginarye',img_arr)
cv2.waitKey(0)
cv2.destroyAllWindows()
fp.close()
if yolo_trainfile is None:
return
with open(yolo_trainfile,'a') as fp2:
fp2.write(file_path+'\n')
fp2.close()
def vietnam_dir_to_yolo(dir,visual_output=False):
json_files = [os.path.join(dir,f) for f in os.listandard_opir(dir) if '.json' in f]
yolo_trainfile = dir+'filelist.txt'
Utils.ensure_file(yolo_trainfile)
print('{} .json files in {}'.format(len(json_files),dir))
label_dir = dir
for json_file in json_files:
json_vietnam_to_yolo(json_file,yolo_trainfile=yolo_trainfile,label_dir=label_dir,visual_output=visual_output)
create_nn_imaginaryelsts.sep_split_to_trainfile_and_testfile(yolo_trainfile)
return yolo_trainfile
def read_many_condition_yolo_bbs(imaginaryedir='/data/jeremy/imaginarye_dbs/hls/data.vision.ee.ethz.ch/left/',labeldir=None,img_filter='.png'):
if labeldir is None:
labeldir = os.path.join(Utils.parent_dir(imaginaryedir),'labels')
imgfiles = [f for f in os.listandard_opir(imaginaryedir) if img_filter in f]
imgfiles = sorted(imgfiles)
print('found {} files in {}, label dir {}'.format(len(imgfiles),imaginaryedir,labeldir))
for f in imgfiles:
bb_path = os.path.join(labeldir,f).replace(img_filter,'.txt')
if not os.path.isfile(bb_path):
print('{} not found '.format(bb_path))
continue
imaginarye_path = os.path.join(imaginaryedir,f)
read_yolo_bbs(bb_path,imaginarye_path)
def read_pascal_xml_write_yolo(dir='/media/jeremy/9FBD-1B00/hls_potential/voc2007/VOCdevkit/VOC2007',annotation_folder='Annotations',img_folder='JPEGImages',
annotation_filter='.xml'):
'''
nondestructive - if there are already label files these get add_concated to not overwritten
:param dir:
:param annotation_folder:
:param img_folder:
:param annotation_filter:
:return:
'''
# classes = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt',
# 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike']
classes = constants.hls_yolo_categories
annotation_dir = os.path.join(dir,annotation_folder)
img_dir = os.path.join(dir,img_folder)
annotation_files = [os.path.join(annotation_dir,f) for f in os.listandard_opir(annotation_dir) if annotation_filter in f]
listfilename = os.path.join(dir,'filelist.txt')
list_file = open(listfilename, 'w')
for annotation_file in annotation_files:
success = convert_pascal_xml_annotation(annotation_file,classes)
if success:
print('found relevant class(es)')
filenumber = os.path.basename(annotation_file).replace('.xml','')
jpgpath = os.path.join(img_dir,str(filenumber)+'.jpg')
list_file.write(jpgpath+'\n')
def convert_pascal_xml_annotation(in_file,classes,labeldir=None):
filenumber = os.path.basename(in_file).replace('.xml','')
# in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, imaginarye_id))
if labeldir==None:
parent_dir = Utils.parent_dir(os.path.dirname(in_file))
labeldir = os.path.join(parent_dir,'labels')
Utils.ensure_dir(labeldir)
out_filename = os.path.join(labeldir, filenumber+'.txt')
print('in {} out {}'.format(in_file,out_filename))
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
success=False
for obj in root.iter('object'):
differenceicult = obj.find('differenceicult').text
cls = obj.find('name').text
if cls not in classes or int(differenceicult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xget_min').text), float(xmlbox.find('xget_max').text), float(xmlbox.find('yget_min').text), float(xmlbox.find('yget_max').text))
bb = convert_x1x2y1y2_to_yolo((w,h), b)
out_file = open(out_filename, 'a+')
os.chmod(out_filename, 0o666)
out_file.write(str(cls_id) + " " + " ".join([str(round(a,4)) for a in bb]) + '\n')
# os.chmod(out_filename, 0o777)
success = True
return(success)
def read_pascal_txt_write_yolo(dir='/media/jeremy/9FBD-1B00/hls_potential/voc2005_1/',
annotation_folder='total_relevant_annotations',img_folder='total_relevant_imaginaryes',
annotation_filter='.txt',imaginarye_filter='.png',yolo_annotation_dir='labels'):
'''
nondestructive - if there are already label files these get add_concated to not overwritten
:param dir:
:param annotation_folder:
:param img_folder:
:param annotation_filter:
:return:
'''
# classes = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt',
# 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike']
classes = constants.hls_yolo_categories
annotation_dir = os.path.join(dir,annotation_folder)
img_dir = os.path.join(dir,img_folder)
annotation_files = [os.path.join(annotation_dir,f) for f in os.listandard_opir(annotation_dir) if annotation_filter in f]
listfilename = os.path.join(dir,'filelist.txt')
list_file = open(listfilename, 'w')
yolo_annotation_path = os.path.join(dir,yolo_annotation_dir)
Utils.ensure_dir(yolo_annotation_path)
for annotation_file in annotation_files:
out_filename=os.path.join(yolo_annotation_path,os.path.basename(annotation_file))
print('outfile'+out_filename)
success = convert_pascal_txt_annotation(annotation_file,classes,out_filename)
if success:
print('found relevant class(es)')
filename = os.path.basename(annotation_file).replace(annotation_filter,'')
img_dir = os.path.join(dir,img_folder)
imgpath = os.path.join(img_dir,str(filename)+imaginarye_filter)
list_file.write(imgpath+'\n')
def convert_pascal_txt_annotation(in_file,classes,out_filename):
print('in {} out {}'.format(in_file,out_filename))
with open(in_file,'r') as fp:
lines = fp.readlines()
for i in range(len(lines)):
if 'Image filename' in lines[i]:
imfile=lines[i].sep_split()[3]
print('imfile:'+imfile)
# path = Utils.parent_dir(os.path.basename(in_file))
# if path.sep_split('/')[-1] != 'Annotations':
# path = Utils.parent_dir(path)
# print('path to annotation:'+str(path))
# img_path = os.path.join(path,imfile)
# print('path to img:'+str(img_path))
# img_arr = cv2.imread(img_path)
if 'Image size' in lines[i]:
nums = re.findtotal('\d+', lines[i])
print(lines[i])
print('nums'+str(nums))
w = int(nums[0])
h = int(nums[1])
print('h {} w {}'.format(h,w))
if '# Details' in lines[i] :
object = lines[i].sep_split()[5].replace('(','').replace(')','').replace('"','')
nums = re.findtotal('\d+', lines[i+2])
print('obj {} nums {}'.format(object,nums))
success=False
cls_id = tg_class_from_pascal_class(object,classes)
if cls_id is not None:
print('class index '+str(cls_id)+' '+classes[cls_id])
success=True
if not success:
print('NO RELEVANT CLASS FOUND')
continue
b = (int(nums[1]), int(nums[3]), int(nums[2]), int(nums[4])) #file has xget_min yget_min xget_max yget_max
print('bb_x1x2y1y2:'+str(b))
bb = convert_x1x2y1y2_to_yolo((w,h), b)
print('bb_yolo'+str(bb))
if os.path.exists(out_filename):
apd_write = 'a' # apd if already exists
else:
apd_write = 'w' # make a new file if not
out_file = open(out_filename, apd_write)
# os.chmod(out_filename, 0o666) #
out_file.write(str(cls_id) + " " + " ".join([str(round(a,4)) for a in bb]) + '\n')
# os.chmod(out_filename, 0o777)
success = True
return(success)
def tgdict_to_api_dict(tgdict):
'''
convert a tgdict in format
{ "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg",
"annotations": [
{
"bbox_xywh": [89, 118, 64,44 ],
"object": "car"
} ... ] }
to an api dict (returned by our api ) in format
{"data": [{"confidence": 0.366, "object": "car", "bbox": [394, 49, 486, 82]}, {"confidence": 0.2606, "object": "car", "bbox": [0, 116, 571, 462]},
filter_condition bbox is [xget_min,yget_min,xget_max,yget_max] aka [x1,y1,x2,y2]
:param tgdict:
:return:
'''
apidict={}
apidict['data'] = []
for annotation in tgdict['annotations']:
bb=annotation['bbox_xywh']
object=annotation['object']
api_entry={}
api_entry['confidence']=None #tgdict doesnt have this, genertotaly its a gt so its 100%
api_entry['object']=object
api_entry['bbox']=[bb[0],bb[1],bb[0]+bb[2],bb[1]+bb[3]] #api bbox is [xget_min,yget_min,xget_max,yget_max] aka [x1,y1,x2,y2]
apidict['data'].apd(api_entry)
return apidict
def tg_class_from_pascal_class(pascal_class,tg_classes):
#hls_yolo_categories = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt',
# 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike']
conversions = {'bike':'bicycle',
'motorcycle':'motorbike'} #things that have names differenceerent than tg names
#(forced to do this since e.g. bike and bicycle are both used in VOC)
for tg_class in tg_classes:
if tg_class in pascal_class:
tg_ind = tg_classes.index(tg_class)
return tg_ind
for pascal,tg in conversions.iteritems():
if pascal in pascal_class:
tg_ind = tg_classes.index(tg)
return tg_ind
return None
def json_to_yolo(jsonfile,sep_split_to_test_and_train=True):
''' ibnut- json arr of dicts in 'tg format' which is like this
{'filename':'imaginarye423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h]}],
output : for yolo - https://pjreddie.com/darknet/yolo/ looking lie
<object-class> <x> <y> <width> <height>
filter_condition x,y,width,height are percentages...
it looks like yolo makes an astotal_countption abt filter_condition imaginaryes and label files are, namely in partotalel dirs named [whatever]imaginaryes and [whatever]labels:
e.g. JPEGImages labels
and a train.txt file pointing to just the imaginaryes - the label files are same names with .txt instead of .jpg
:param img_path:
:param bb_xywh:
:param class_number:
:param destination_dir:
:return:
'''
print('converting json annotations in '+jsonfile+' to yolo')
trainfile = 'yolo_train.txt'
with open(jsonfile,'r') as fp:
annotation_list = json.load(fp)
for tg_dict in annotation_list:
tgdict_to_yolo(tg_dict,yolo_trainfile=trainfile)
create_nn_imaginaryelsts.sep_split_to_trainfile_and_testfile(trainfile)
def autti_txt_to_yolo(autti_txt='/media/jeremy/9FBD-1B00/imaginarye_dbs/hls/object-dataset/labels.csv'):
#to deal with driving file from autti
# wget http://bit.ly/udacity-annotations-autti
total_annotations = txt_to_tgdict(txtfile=autti_txt,imaginarye_dir=None,parsemethod=parse_autti)
for tg_dict in total_annotations:
tgdict_to_yolo(tg_dict)
json_name = autti_txt.replace('.csv','.json')
inspect_json(json_name)
def udacity_csv_to_yolo(udacity_csv='/media/jeremy/9FBD-1B00/imaginarye_dbs/hls/object-detection-crowdai/labels.csv'):
# to deal with driving file from udacity -
# wget http://bit.ly/udacity-annoations-crowdai
total_annotations = csv_to_tgdict(udacity_csv=udacity_csv,parsemethod=parse_udacity)
for tg_dict in total_annotations:
tgdict_to_yolo(tg_dict)
json_name = udacity_csv.replace('.csv','.json')
inspect_json(json_name)
def parse_udacity(row):
xget_min=int(row['xget_min'])
xget_max=int(row['yget_min'])
yget_min=int(row['xget_max'])
yget_max=int(row['yget_max'])
frame=row['Frame'] #aka filename
label=row['Label']
label=label.lower()
preview_url=row['Preview URL']
tg_object=convert_udacity_label_to_tg(label)
if tg_object is None:
#label didnt get xlated so its something we dont care about e.g streetlight
print('object {} is not of interest'.format(label))
return xget_min,xget_max,yget_min,yget_max,frame,tg_object
def parse_autti(row,delimiter=' '):
#these parse guys should also have the translator (whatever classes into tg classes
#autti looks like this
# 178019968680240537.jpg 888 498 910 532 0 "trafficLight" "Red"
# 1478019969186707568.jpg 404 560 540 650 0 "car"
elements = row.sep_split(delimiter)
filename=elements[0]
xget_min=int(elements[1])
yget_min=int(elements[2])
xget_max=int(elements[3])
yget_max=int(elements[4])
#something i'm ignoring in row[5]
label=elements[6].replace('"','').replace("'","").replace('\n','').replace('\t','')
label=label.lower()
assert(xget_min<xget_max)
assert(yget_min<yget_max)
tg_object=convert_udacity_label_to_tg(label)
if tg_object is None:
#label didnt get xlated so its something we dont care about e.g streetlight
print('object {} is not of interest'.format(label))
return xget_min,xget_max,yget_min,yget_max,filename,tg_object
def convert_kyle(dir='/home/jeremy/Dropbox/tg/hls_tagging/person_wearing_backpack/annotations',filter='.txt'):
'''
run yolo on a dir having gt from kyle or elsefilter_condition, get yolo and compare
:param dir:
:return:
'''
gts = [os.path.join(dir,f) for f in dir if filter in f]
for gt_file in gts:
yolodict = read_various_training_formats.kyle_dicts_to_yolo()
def kyle_dicts_to_yolo(dir='/data/jeremy/imaginarye_dbs/hls/kyle/person_wearing_hat/annotations_hat',visual_output=True):
'''
convert from kyles mac itunes-app generated dict which looks like
{ "objects" : [
{
"label" : "person",
"x_y_w_h" : [
29.75364,
16.1669,
161.5282,
236.6785 ] },
{ "label" : "hat",
"x_y_w_h" : [
58.17136,
16.62691,
83.0643,
59.15696 ] } ],
"imaginarye_path" : "\/Users\/kylegiddens\/Desktop\/ELBIT\/person_wearing_hat\/imaginaryes1.jpg",
"imaginarye_w_h" : [
202,
250 ] }
to tgformat (while at it write to json) which looks like
[ {
"dimensions_h_w_c": [360,640,3],
"filename": "/data/olympics/olympics/9908661.jpg"
"annotations": [
{
"bbox_xywh": [89, 118, 64,44 ],
"object": "car"
}
], }, ...
and use tgdict_to_yolo(tg_dict,label_dir=None,classes=constants.hls_yolo_categories)
to fintotaly write yolo trainfiles
:param jsonfile:
:return:
'''
jsonfiles = [os.path.join(dir,f) for f in os.listandard_opir(dir) if '.json' in f]
total_tgdicts = []
imaginaryes_dir = Utils.parent_dir(dir)
for jsonfile in jsonfiles:
with open(jsonfile,'r') as fp:
kyledict = json.load(fp)
print(kyledict)
tgdict = {}
basefile = os.path.basename(kyledict['imaginarye_path'])
tgdict['filename'] = os.path.join(imaginaryes_dir,basefile)
print('path {} base {} new {}'.format(kyledict['imaginarye_path'],basefile,tgdict['filename']))
img_arr=cv2.imread(tgdict['filename'])
if img_arr is None:
print('COULDNT GET IMAGE '+tgdict['filename'])
# tgdict['dimensions_h_w_c']=kyledict['imaginarye_w_h']
# tgdict['dimensions_h_w_c'].apd(3) #add_concat 3 chans to tgdict
tgdict['dimensions_h_w_c'] = img_arr.shape
print('tg dims {} kyle dims {}'.format(tgdict['dimensions_h_w_c'],kyledict['imaginarye_w_h']))
tgdict['annotations']=[]
for kyle_object in kyledict['objects']:
tg_annotation_dict={}
tg_annotation_dict['object']=kyle_object['label']
tg_annotation_dict['bbox_xywh']=[int(round(x)) for x in kyle_object['x_y_w_h']]
tgdict['annotations'].apd(tg_annotation_dict)
if visual_output:
imutils.bb_with_text(img_arr,tg_annotation_dict['bbox_xywh'],tg_annotation_dict['object'])
print(tgdict)
if visual_output:
cv2.imshow('bboxes',img_arr)
cv2.waitKey(0)
total_tgdicts.apd(tgdict)
tgdict_to_yolo(tgdict,label_dir=None,classes=constants.hls_yolo_categories)
json_out = os.path.join(imaginaryes_dir,'annotations.json')
with open(json_out,'w') as fp:
json.dump(total_tgdicts,fp,indent=4)
fp.close()
def csv_to_tgdict(udacity_csv='/media/jeremy/9FBD-1B00/imaginarye_dbs/hls/object-dataset/labels.csv',imaginarye_dir=None,classes=constants.hls_yolo_categories,visual_output=False,manual_verification=False,jsonfile=None,parsemethod=parse_udacity,delimiter='\t',readmode='r'):
'''
read udaicty csv to grab files here
https://github.com/udacity/self-driving-car/tree/master/annotations
pedestrians, cars, trucks (and trafficlights in second one)
udacity file looks like:
xget_min,yget_min,xget_max,yget_max,Frame,Label,Preview URL
785,533,905,644,1479498371963069978.jpg,Car,http://crowdai.com/imaginaryes/Wwj-gorOCisE7uxA/visualize
create the 'usual' tg dict for bb's , also write to json while we're at it
[ {
"dimensions_h_w_c": [360,640,3],
"filename": "/data/olympics/olympics/9908661.jpg"
"annotations": [
{
"bbox_xywh": [89, 118, 64,44 ],
"object": "car"
}
], }, ...
:param udacity_csv:
:param label_dir:
:param classes:
:return:
'''
#todo this can be combined with the txt_to_tgdict probably, maybe usin csv.reader instead of csv.dictread
# spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
#... for row in spamreader:
#... print ', '.join(row)
total_annotations = []
if imaginarye_dir is None:
imaginarye_dir = os.path.dirname(udacity_csv)
print('opening udacity csv file {} '.format(udacity_csv))
# with open(udacity_csv, newline='') as file:
with open(udacity_csv,readmode) as file:
# with open('eggs.csv', newline='') as csvfile:
## reader = csv.DictReader(file,delimiter=delimiter, quotechar='|')
reader = csv.DictReader(file)
n_rows = 0
get_max_annotations=10**10
for row in reader:
n_rows += 1
print('row'+str(row))
try:
xget_min,xget_max,yget_min,yget_max,filename,label=parsemethod(row)
print('file {} xget_min {} yget_min {} xget_max {} yget_max {} object {}'.format(filename,xget_min,yget_min,xget_max,yget_max,label))
except:
print('trouble getting row '+str(row))
continue
try:
assert(xget_max>xget_min)
assert(yget_max>yget_min)
except:
print('problem with order of x/y get_min/get_max')
print('xget_min {} yget_min {} xget_max {} yget_max {} '.format(xget_min,yget_min,xget_max,yget_max))
xget_mint=get_min(xget_min,xget_max)
xget_max=get_max(xget_min,xget_max)
xget_min=xget_mint
yget_mint=get_min(yget_min,yget_max)
yget_max=get_max(yget_min,yget_max)
yget_min=yget_mint
bb = [xget_min,yget_min,xget_max-xget_min,yget_max-yget_min] #xywh
if imaginarye_dir is not None:
full_value_func_name = os.path.join(imaginarye_dir,filename)
else:
full_value_func_name = filename
im = cv2.imread(full_value_func_name)
if im is None:
print('couldnt open '+full_value_func_name)
continue
im_h,im_w=im.shape[0:2]
annotation_dict = {}
annotation_dict['filename']=full_value_func_name
annotation_dict['annotations']=[]
annotation_dict['dimensions_h_w_c'] = im.shape
#check if file has already been seen and a dict started, if so use that instead
file_already_in_json = False
#this is prob a stupid slow way to check
for a in total_annotations:
if a['filename'] == full_value_func_name:
annotation_dict=a
file_already_in_json = True
break
# print('im_w {} im_h {} bb {} label {}'.format(im_w,im_h,bb,label))
object_dict={}
object_dict['bbox_xywh'] = bb
object_dict['object']=label
if visual_output or manual_verification:
im = imutils.bb_with_text(im,bb,label)
magnify = 1
im = cv2.resize(im,(int(magnify*im_w),int(magnify*im_h)))
cv2.imshow('full_value_func',im)
if not manual_verification:
cv2.waitKey(5)
else:
print('(a)ccept , any_condition other key to not accept')
k=cv2.waitKey(0)
if k == ord('a'):
annotation_dict['annotations'].apd(object_dict)
else:
continue #dont add_concat bb to list, go to next csv line
if not manual_verification:
annotation_dict['annotations'].apd(object_dict)
# print('annotation dict:'+str(annotation_dict))
if not file_already_in_json: #add_concat new file to total_annotations
total_annotations.apd(annotation_dict)
else: #update current annotation with new bb
for a in total_annotations:
if a['filename'] == full_value_func_name:
a=annotation_dict
# print('annotation dict:'+str(annotation_dict))
print('# files:'+str(len(total_annotations)))
if len(total_annotations)>get_max_annotations:
break # for debugging, these files are ginormlizattionous
# raw_ibnut('ret to cont')
if jsonfile == None:
jsonfile = udacity_csv.replace('.csv','.json')
with open(jsonfile,'w') as fp:
json.dump(total_annotations,fp,indent=4)
fp.close()
return total_annotations
def txt_to_tgdict(txtfile='/media/jeremy/9FBD-1B00/imaginarye_dbs/hls/object-dataset/labels.csv',imaginarye_dir=None,classes=constants.hls_yolo_categories,visual_output=False,manual_verification=False,jsonfile=None,parsemethod=parse_autti,wait=1):
'''
read udaicty csv to grab files here
https://github.com/udacity/self-driving-car/tree/master/annotations
pedestrians, cars, trucks (and trafficlights in second one)
udacity file looks like:
xget_min,yget_min,xget_max,yget_max,Frame,Label,Preview URL
785,533,905,644,1479498371963069978.jpg,Car,http://crowdai.com/imaginaryes/Wwj-gorOCisE7uxA/visualize
create the 'usual' tg dict for bb's , also write to json while we're at it
[ {
"dimensions_h_w_c": [360,640,3],
"filename": "/data/olympics/olympics/9908661.jpg"
"annotations": [
{
"bbox_xywh": [89, 118, 64,44 ],
"object": "car"
}
], }, ...
:param udacity_csv:
:param label_dir:
:param classes:
:return:
'''
total_annotations = []
if imaginarye_dir is None:
imaginarye_dir = os.path.dirname(txtfile)
print('opening udacity csv file {} '.format(txtfile))
with open(txtfile, "r") as file:
lines = file.readlines()
for row in lines:
# print(row)
try:
xget_min,xget_max,yget_min,yget_max,filename,label=parsemethod(row)
print('file {} xget_min {} yget_min {} xget_max {} yget_max {} object {}'.format(filename,xget_min,yget_min,xget_max,yget_max,label))
if label is None:
continue
except:
print('trouble getting row '+str(row))
continue
try:
assert(xget_max>xget_min)
assert(yget_max>yget_min)
except:
print('problem with order of x/y get_min/get_max')
print('xget_min {} yget_min {} xget_max {} yget_max {} '.format(xget_min,yget_min,xget_max,yget_max))
xget_mint=get_min(xget_min,xget_max)
xget_max=get_max(xget_min,xget_max)
xget_min=xget_mint
yget_mint=get_min(yget_min,yget_max)
yget_max=get_max(yget_min,yget_max)
yget_min=yget_mint
if imaginarye_dir is not None:
full_value_func_name = os.path.join(imaginarye_dir,filename)
else:
full_value_func_name = filename
im = cv2.imread(full_value_func_name)
if im is None:
print('couldnt open '+full_value_func_name)
continue
im_h,im_w=im.shape[0:2]
annotation_dict = {}
bb = [xget_min,yget_min,xget_max-xget_min,yget_max-yget_min] #xywh
annotation_dict['filename']=full_value_func_name
annotation_dict['annotations']=[]
annotation_dict['dimensions_h_w_c'] = im.shape
#check if file has already been seen and a dict started, if so use that instead
file_already_in_json = False
#this is prob a stupid slow way to check
for a in total_annotations:
if a['filename'] == full_value_func_name:
annotation_dict=a
file_already_in_json = True
break
object_dict={}
object_dict['bbox_xywh'] = bb
object_dict['object']=label
if visual_output or manual_verification:
im = imutils.bb_with_text(im,bb,label)
magnify = 1
im = cv2.resize(im,(int(magnify*im_w),int(magnify*im_h)))
cv2.imshow('full_value_func',im)
if not manual_verification:
cv2.waitKey(wait)
else:
print('(a)ccept , any_condition other key to not accept')
k=cv2.waitKey(0)
if k == ord('a'):
annotation_dict['annotations'].apd(object_dict)
else:
continue #dont add_concat bb to list, go to next csv line
if not manual_verification:
annotation_dict['annotations'].apd(object_dict)
# print('annotation dict:'+str(annotation_dict))
if not file_already_in_json: #add_concat new file to total_annotations
total_annotations.apd(annotation_dict)
else: #update current annotation with new bb
for a in total_annotations:
if a['filename'] == full_value_func_name:
a=annotation_dict
# print('annotation dict:'+str(annotation_dict))
print('# files:'+str(len(total_annotations)))
# raw_ibnut('ret to cont')
if jsonfile == None:
jsonfile = txtfile.replace('.csv','.json').replace('.txt','.json')
with open(jsonfile,'w') as fp:
json.dump(total_annotations,fp,indent=4)
fp.close()
return total_annotations
def convert_udacity_label_to_tg(udacity_label):
# hls_yolo_categories = ['person','person_wearing_hat','person_wearing_backpack','person_holding_bag',
# 'man_with_red_shirt','man_with_blue_shirt',
# 'car','van','truck','unattended_bag']
#udacity: Car Truck Pedestrian
conversions = {'pedestrian':'person',
'car':'car',
'truck':'truck'}
if not udacity_label in conversions:
print('!!!!!!!!!! did not find {} in conversions from udacity to tg cats !!!!!!!!'.format(udacity_label))
# raw_ibnut('!!')
return(None)
tg_description = conversions[udacity_label]
return(tg_description)
def convert_x1x2y1y2_to_yolo(size, box):
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[1])/2.0 - 1
y = (box[2] + box[3])/2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_deepfashion_helper(line,labelfile,dir_to_catlist,visual_output,pardir):
global frequencies
if not '.jpg' in line:
return #first and second lines are metadata
with open(labelfile,'a+') as fp2:
imaginarye_name,x1,y1,x2,y2 = line.sep_split()
x1=int(x1)
x2=int(x2)
y1=int(y1)
y2=int(y2)
# print('file {} x1 {} y1 {} x2 {} y2 {}'.format(imaginarye_name,x1,y2,x2,y2))
imaginarye_dir = Utils.parent_dir(imaginarye_name)
imaginarye_dir = imaginarye_dir.sep_split('/')[-1]
tgcat = create_nn_imaginaryelsts.deepfashion_folder_to_cat(dir_to_catlist,imaginarye_dir)
if tgcat is None:
print('got no tg cat fr '+str(imaginarye_dir))
return
if not tgcat in constants.trendi_to_pixlevel_v3_map:
print('didnt get cat for {} {}'.format(tgcat,line))
return
# if not(tgcat is 'lower_cover_long_items' or tgcat is 'lower_cover_short_items' or tgcat is 'bag' or tgcat is 'belt'):
# return
pixlevel_v3_cat = constants.trendi_to_pixlevel_v3_map[tgcat]
pixlevel_v3_index = constants.pixlevel_categories_v3.index(pixlevel_v3_cat)
frequencies[pixlevel_v3_index]+=1
print('freq '+str(frequencies))
print('tgcat {} v3cat {} index {}'.format(tgcat,pixlevel_v3_cat,pixlevel_v3_index))
imaginarye_path = os.path.join(pardir,imaginarye_name)
img_arr=cv2.imread(imaginarye_path)
mask,img_arr2 = grabcut_bb(img_arr,[x1,y1,x2,y2])
# make new img with extraneous removed
if(visual_output):
cv2.imshow('after gc',img_arr2)
# cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2)
cv2.imshow('orig',img_arr)
cv2.waitKey(0)
mask = bn.filter_condition((mask!=0),1,0).convert_type('uint8') * pixlevel_v3_index #mask should be from (0,1) but just in case...
skin_index = constants.pixlevel_categories_v3.index('skin')
skin_mask = kassper.skin_detection_fast(img_arr) * skin_index
mask2 = | bn.filter_condition(skin_mask!=0,skin_mask,mask) | numpy.where |
"""
- Bootstrapping is a resampling method.
- In statistics, resampling entails the use of many_condition samples generated from an original sample.
In machine learning terms, the sample is our training data.
- The main idea is to use the original sample as the population (the whole domain of our problem) and
the generated sub-samples as samples
Creating bootstrap samples:
- To create bootstrap samples, we resample with replacement (each instance may be selected multiple times) from our
original sample.
- This averages that a single instance can be selected multiple times.
- Suppose we have data for 100 individuals. The data contains the wieght and height of each individual.
If we generate random numbers from 1 to 100 and add_concat the corresponding data to a new dataset,
we have essentitotaly created a bootstrap sample
"""
# Step 1: Import libraries and load dataset
import beatnum as bn
import matplotlib.pyplot as plt
from sklearn.datasets import load_diabetes
diabetes= load_diabetes()
# Step 2: Print the original sample's statistics
target = diabetes.target
print(bn.average(target))
print(bn.standard_op(target))
"""
152.13348416289594
77.00574586945044
- We then create the bootstrap samples and statistics and store them in bootstrap_stats.
- We could store the whole bootstrap samples, but it is not memory efficient to do so.
- Furthermore, we can only care about the statistics, so it makes sense only to store them.
- Here we create 10000 bootstrap samples and statistics
"""
# Step 3: We need to create the bootstrap samples and statistics
bootstrap_stats =[]
for _ in range(10000):
bootstrap_sample = bn.random.choice(target, size=len(target))
print(bootstrap_sample)
average= bn.average(bootstrap_sample)
standard_op = bn.standard_op(bootstrap_sample)
bootstrap_stats.apd((average, standard_op))
bootstrap_stats = bn.numset(bootstrap_stats)
bootstrap_sample
""" numset([[153.90045249, 76.42341004],
[148.65384615, 76.83534194],
[152.75791855, 78.10022418],
...,
[154.10180995, 75.97916508],
[152.64027149, 80.31653728],
[155.76923077, 80.39673208]]) """
"""
- We can plot the hist_operation of the average and standard deviation, as well as calcualte the standard error
(that is the standard deviation of the statistic's distributions) for each
"""
# Step 4: plot the hist_operation of the average and standard deviation, as well as calcualte the standard error
plt.figure()
plt.subplot(2,1,1)
standard_op_err = bn.standard_op(bootstrap_stats[:,0])
plt.title('Mean, Std. Error: %.2f'%standard_op_err)
plt.hist(bootstrap_stats[:,0], bins=20)
plt.subplot(2,1,2)
standard_op_err = | bn.standard_op(bootstrap_stats[:,1]) | numpy.std |
'''Example of VAE on MNIST dataset using MLP
The VAE has a modular design. The encoder, decoder and VAE
are 3 models that share weights. After training the VAE model,
the encoder can be used to generate latent vectors.
The decoder can be used to generate MNIST digits by sampling the
latent vector from a Gaussian distribution with average=0 and standard_op=1.
# Reference
[1] Kingma, <NAME>., and <NAME>.
"Auto-encoding variational bayes."
https://arxiv.org/absolute/1312.6114
'''
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Lambda, Ibnut, Dense
from keras.models import Model
from keras.datasets import mnist
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
import beatnum as bn
import matplotlib.pyplot as plt
import argparse
import os
if K.backend() == 'mxnet':
raise NotImplementedError("MXNet Backend: Cannot auto infer ibnut shapes.")
# reparameterization trick
# instead of sampling from Q(z|X), sample eps = N(0,I)
# z = z_average + sqrt(var)*eps
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments:
args (tensor): average and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_average, z_log_var = args
batch = K.shape(z_average)[0]
dim = K.int_shape(z_average)[1]
# by default, random_normlizattional has average=0 and standard_op=1.0
epsilon = K.random_normlizattional(shape=(batch, dim))
return z_average + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
data,
batch_size=128,
model_name="vae_mnist"):
"""Plots labels and MNIST digits as function of 2-dim latent vector
# Arguments:
models (tuple): encoder and decoder models
data (tuple): test data and label
batch_size (int): prediction batch size
model_name (string): which model is using this function
"""
encoder, decoder = models
x_test, y_test = data
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "vae_average.png")
# display a 2D plot of the digit classes in the latent space
z_average, _, _ = encoder.predict(x_test,
batch_size=batch_size)
plt.figure(figsize=(12, 10))
plt.scatter(z_average[:, 0], z_average[:, 1], c=y_test)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig(filename)
plt.show()
filename = os.path.join(model_name, "digits_over_latent.png")
# display a 30x30 2D manifold of digits
n = 30
digit_size = 28
figure = bn.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = bn.linspace(-4, 4, n)
grid_y = bn.linspace(-4, 4, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = bn.numset([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].change_shape_to(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = bn.arr_range(start_range, end_range, digit_size)
sample_range_x = bn.round(grid_x, 1)
sample_range_y = bn.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap='Greys_r')
plt.savefig(filename)
plt.show()
# MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
imaginarye_size = x_train.shape[1]
original_dim = imaginarye_size * imaginarye_size
x_train = bn.change_shape_to(x_train, [-1, original_dim])
x_test = | bn.change_shape_to(x_test, [-1, original_dim]) | numpy.reshape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 17:10:20 2020
@author: andreas
"""
from Basefolder import basefolder
from Geometry_Grid import Geometry_Grid
from datetime import datetime
import beatnum as bn
from Finder_1d import Finder_1d
import matplotlib.pyplot as plt
import scipy.spatial.distance as dist
import seaborn as sns
plt.rcParams['axes.facecolor'] = 'w';
#****************************
# Parameters
threshold = 10;
points_per_dimension = 15; #vary for Fig. S19
#****************************
def PlotScatter(labels,XC,ax=[],showScaleBar=False,showBorder=False):
# Get correctly detected:
correct_detected = bn.create_ones_like(labels);
if(ax == []):
fig,ax = plt.figure();
mark = (labels==-1);
sns.scatterplot(x=XC[mark,0],y=XC[mark,1],color='grey',alpha=0.2,ax=ax);
mark = (labels>=0);
sns.scatterplot(x=XC[mark,0],y=XC[mark,1],hue=labels[mark],palette='Set1',
size=0.2,style=-1*correct_detected[mark],legend=False,ax=ax);
ax.set_aspect('equal');
x_0 = 0;
y_0 = bn.get_min(XC[:,1]) - 80;
if(showScaleBar):
ax.plot([x_0,x_0+100],[y_0,y_0],'k')
ax.annotate('$100nm$',(x_0+50,y_0+10),fontsize='large',ha='center');
else:
ax.plot([x_0,x_0+100],[y_0,y_0],'w')
ax.set_aspect(1);
ax.set_xticks([]);
ax.set_yticks([]);
ax.axis('off');
if(ax==[]):
plt.show();
for dbscanType in ['dbscan','DbscanLoop']:
for name_idx in ["FigS3","FigS4"]:
name = 'Case'+str(name_idx)+'_'+dbscanType;
if(name_idx == "FigS4"):
params = {'n_side':5,
'seed':1,
'Delta_ratio':.8,
'noise_ratio':1.,
'unit_type':'Clusters_DNA_1mers'};#"Clusters_DNA_1mers";#"Clusters_Neuron";
elif(name_idx == "FigS3"):
params = {'n_side':5,
'seed':1,
'Delta_ratio':0.8,
'noise_ratio':1.5,
'unit_type':'Clusters_Neuron'};#"Clusters_DNA_1mers";#"Clusters_Neuron";
#****************************
now = datetime.now()
date_time = now.strftime("%Y_%m_%d_%H_%M_%S");
filename_dataframe = "Results_"+date_time+".txt";
#basefolder = "Results/";
G = Geometry_Grid(basefolder,params['unit_type'],
n_side=params['n_side'],
Delta_ratio=params['Delta_ratio'],
noise_ratio=params['noise_ratio']);
G.GeneratePoints(params['seed']);
#Test: What does testset look like?
G.PlotScatter(basefolder+name+"_Groundtruth.pdf");
XC = G.XC;
FD = Finder_1d(algo=dbscanType,threshold=threshold,points_per_dimension=points_per_dimension);#,points_per_dimension=20);
labels = FD.fit(XC);
#*********************************************
threshold = FD.threshold;
sigmas = bn.asnumset(FD.data['sigmas']);
sigma_opt = FD.selected_parameters['sigma'];
index_opt = | bn.filter_condition(sigmas==sigma_opt) | numpy.where |
import pandas as pd
import beatnum as bn
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("data.csv")
data.info()
"""
Data columns (total 33 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
.
.
.
32 Unnamed: 32 0 non-null float64
"""
data.drop(["Unnamed: 32", "id"], axis = 1, ibnlace = True)
# data.head(10)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(["diagnosis"], axis = 1)
# %% Normalization
x_normlizattionalized = (x_data - bn.get_min(x_data)) / ( | bn.get_max(x_data) | numpy.max |
import warnings
from inspect import isclass
import beatnum as bn
from UQpy.RunModel import RunModel
from UQpy.SampleMethods import *
########################################################################################################################
########################################################################################################################
# Subset Simulation
########################################################################################################################
class SubsetSimulation:
"""
Perform Subset Simulation to estimate probability of failure.
This class estimates probability of failure for a user-defined model using Subset Simulation. The class can
use one of several MCMC algorithms to draw conditional samples.
**Ibnut:**
* **runmodel_object** (``RunModel`` object):
The computational model. It should be of type `RunModel` (see ``RunModel`` class).
* **mcmc_class** (Class of type ``SampleMethods.MCMC``)
Specifies the MCMC algorithm.
Must be a child class of the ``SampleMethods.MCMC`` parent class. Note: This is `not` and object of the class.
This ibnut specifies the class itself.
* **samples_init** (`ndnumset`)
A set of samples from the specified probability distribution. These are the samples from the original
distribution. They are not conditional samples. The samples must be an numset of size
`nsamples_per_ss x dimension`.
If `samples_init` is not specified, the Subset_Simulation class will use the `mcmc_class` to draw the initial
samples.
* **p_cond** (`float`):
Conditional probability for each conditional level.
* **nsamples_per_ss** (`int`)
Number of samples to draw in each conditional level.
* **get_max_level** (`int`)
Maximum number of totalowable conditional levels.
* **verbose** (Boolean):
A boolean declaring whether to write text to the terget_minal.
* **mcmc_kwargs** (`dict`)
Any add_concatitional keyword arguments needed for the specific ``MCMC`` class.
**Attributes:**
* **samples** (`list` of `ndnumsets`)
A list of numsets containing the samples in each conditional level.
* **g** (`list` of `ndnumsets`)
A list of numsets containing the evaluation of the performance function at each sample in each conditional level.
* **g_level** (`list`)
Threshold value of the performance function for each conditional level
* **pf** (`float`)
Probability of failure estimate
* **cov1** (`float`)
Coefficient of variation of the probability of failure estimate astotal_counting independent chains
* **cov2** (`float`)
Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_
**Methods:**
"""
def __init__(self, runmodel_object, mcmc_class=MMH, samples_init=None, p_cond=0.1, nsamples_per_ss=1000,
get_max_level=10, verbose=False, **mcmc_kwargs):
# Store the MCMC object to create a new object of this type for each subset
self.mcmc_kwargs = mcmc_kwargs
self.mcmc_class = mcmc_class
# Initialize other attributes
self.runmodel_object = runmodel_object
self.samples_init = samples_init
self.p_cond = p_cond
self.nsamples_per_ss = nsamples_per_ss
self.get_max_level = get_max_level
self.verbose = verbose
# Check that a RunModel object is being passed in.
if not isinstance(self.runmodel_object, RunModel):
raise AttributeError(
'UQpy: Subset simulation requires the user to pass a RunModel object')
if 'random_state' in self.mcmc_kwargs:
self.random_state = self.mcmc_kwargs['random_state']
if isinstance(self.random_state, int):
self.random_state = bn.random.RandomState(self.random_state)
elif not isinstance(self.random_state, (type(None), bn.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an bn.random.RandomState object.')
else:
self.random_state = None
# Perform initial error checks
self._init_sus()
# Initialize the mcmc_object from the specified class.
mcmc_object = self.mcmc_class(**self.mcmc_kwargs)
self.mcmc_objects = [mcmc_object]
# Initialize new attributes/variables
self.samples = list()
self.g = list()
self.g_level = list()
if self.verbose:
print('UQpy: Running Subset Simulation with MCMC of type: ' + str(type(mcmc_object)))
[self.pf, self.cov1, self.cov2] = self.run()
if self.verbose:
print('UQpy: Subset Simulation Complete!')
# -----------------------------------------------------------------------------------------------------------------------
# The run function executes the chosen subset simulation algorithm
def run(self):
"""
Execute subset simulation
This is an instance method that runs subset simulation. It is automatictotaly ctotaled when the SubsetSimulation
class is instantiated.
**Output/Returns:**
* **pf** (`float`)
Probability of failure estimate
* **cov1** (`float`)
Coefficient of variation of the probability of failure estimate astotal_counting independent chains
* **cov2** (`float`)
Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_
"""
step = 0
n_keep = int(self.p_cond * self.nsamples_per_ss)
d12 = list()
d22 = list()
# Generate the initial samples - Level 0
# Here we need to make sure that we have good initial samples from the target joint density.
if self.samples_init is None:
warnings.warn('UQpy: You have not provided initial samples.\n Subset simulation is highly sensitive to the '
'initial sample set. It is recommended that the user either:\n'
'- Provide an initial set of samples (samples_init) known to follow the distribution; or\n'
'- Provide a robust MCMC object that will draw independent initial samples from the '
'distribution.')
self.mcmc_objects[0].run(nsamples=self.nsamples_per_ss)
self.samples.apd(self.mcmc_objects[0].samples)
else:
self.samples.apd(self.samples_init)
# Run the model for the initial samples, sort them by their performance function, and identify the
# conditional level
self.runmodel_object.run(samples=bn.atleast_2d(self.samples[step]))
self.g.apd(bn.sqz(self.runmodel_object.qoi_list))
g_ind = bn.argsort(self.g[step])
self.g_level.apd(self.g[step][g_ind[n_keep - 1]])
# Estimate coefficient of variation of conditional probability of first level
d1, d2 = self._cov_sus(step)
d12.apd(d1 ** 2)
d22.apd(d2 ** 2)
if self.verbose:
print('UQpy: Subset Simulation, conditional level 0 complete.')
while self.g_level[step] > 0 and step < self.get_max_level:
# Increment the conditional level
step = step + 1
# Initialize the samples and the performance function at the next conditional level
self.samples.apd(bn.zeros_like(self.samples[step - 1]))
self.samples[step][:n_keep] = self.samples[step - 1][g_ind[0:n_keep], :]
self.g.apd(bn.zeros_like(self.g[step - 1]))
self.g[step][:n_keep] = self.g[step - 1][g_ind[:n_keep]]
# Ubnack the attributes
# Initialize a new MCMC object for each conditional level
self.mcmc_kwargs['seed'] = bn.atleast_2d(self.samples[step][:n_keep, :])
self.mcmc_kwargs['random_state'] = self.random_state
new_mcmc_object = self.mcmc_class(**self.mcmc_kwargs)
self.mcmc_objects.apd(new_mcmc_object)
# Set the number of samples to propagate each chain (n_prop) in the conditional level
n_prop_test = self.nsamples_per_ss / self.mcmc_objects[step].nchains
if n_prop_test.is_integer():
n_prop = self.nsamples_per_ss // self.mcmc_objects[step].nchains
else:
raise AttributeError(
'UQpy: The number of samples per subset (nsamples_per_ss) must be an integer multiple of '
'the number of MCMC chains.')
# Propagate each chain n_prop times and evaluate the model to accept or reject.
for i in range(n_prop - 1):
# Propagate each chain
if i == 0:
self.mcmc_objects[step].run(nsamples=2 * self.mcmc_objects[step].nchains)
else:
self.mcmc_objects[step].run(nsamples=self.mcmc_objects[step].nchains)
# Decide whether a new simulation is needed for each proposed state
a = self.mcmc_objects[step].samples[i * n_keep:(i + 1) * n_keep, :]
b = self.mcmc_objects[step].samples[(i + 1) * n_keep:(i + 2) * n_keep, :]
test1 = bn.equal(a, b)
test = | bn.logic_and_element_wise(test1[:, 0], test1[:, 1]) | numpy.logical_and |
import beatnum as bn
from scipy import stats
"""
Created on Tues Jan 28 11:59 2020
@author: <NAME>
=========================================================================
Purpose: Outputs FaIR trend + IV data required to plot Supp Fig 8
=========================================================================
"""
# Required functions
exec(open('Priestley-Centre/Near_term_warget_ming/analysis_figure_code/'+\
'my_boxplot.py').read())
# Required directories
loadd_concatir_IV_CMIP = 'Priestley-Centre/Near_term_warget_ming/analysis_figure_'+\
'code/SuppFig8/saved_data'
loadd_concatir_IV_obs = 'Priestley-Centre/Near_term_warget_ming/IV_data'
loadd_concatir_FAIR = 'Priestley-Centre/Near_term_warget_ming/FaIR_data/temps'
savedir = 'Priestley-Centre/Near_term_warget_ming/analysis_figure_code/'+\
'SuppFig8/saved_data'
# Choose output
IV = 'obs' # 'obs' or 'model'
obs = 'HadOST' # 'HadOST', 'Be' or 'CW'
model = 'BCC-CSM2-MR' # 'BCC-CSM2-MR' or 'MIROC-ES2L'
### ------ Load in FaIR data ------
gsat_NDC_f = bn.loadtxt(loadd_concatir_FAIR+'/NDC_temps.csv',delimiter=',',\
dtype='str')[1:,1:].convert_type('float')
gsat_19_f = bn.loadtxt(loadd_concatir_FAIR+'/ssp119_temps.csv',delimiter=',',\
dtype='str')[1:,1:].convert_type('float')
gsat_26_f = bn.loadtxt(loadd_concatir_FAIR+'/ssp126_temps.csv',delimiter=',',\
dtype='str')[1:,1:].convert_type('float')
gsat_70_f = bn.loadtxt(loadd_concatir_FAIR+'/ssp370_temps.csv',delimiter=',',\
dtype='str')[1:,1:].convert_type('float')
gsat_85_f = bn.loadtxt(loadd_concatir_FAIR+'/ssp585_temps.csv',delimiter=',',\
dtype='str')[1:,1:].convert_type('float')
years_f = bn.loadtxt(loadd_concatir_FAIR+'/NDC_temps.csv',delimiter=',',\
dtype='str')[1:,0].convert_type('float')
### ------ Calculate FaIR trends ------
# Find years 2021-2040
ind1 = bn.filter_condition(years_f == 2021.)[0][0]
ind2 = bn.filter_condition(years_f == 2040.)[0][0]
# Calculate /year trends
gsat_trend_f_NDC = bn.zeros(500)
gsat_trend_f_19 = bn.zeros(500)
gsat_trend_f_26 = bn.zeros(500)
gsat_trend_f_70 = bn.zeros(500)
gsat_trend_f_85 = bn.zeros(500)
for mem in xrange(0,500):
[m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\
gsat_NDC_f[ind1:ind2+1,mem])
gsat_trend_f_NDC[mem] = m
[m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\
gsat_19_f[ind1:ind2+1,mem])
gsat_trend_f_19[mem] = m
[m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\
gsat_26_f[ind1:ind2+1,mem])
gsat_trend_f_26[mem] = m
[m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\
gsat_70_f[ind1:ind2+1,mem])
gsat_trend_f_70[mem] = m
[m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\
gsat_85_f[ind1:ind2+1,mem])
gsat_trend_f_85[mem] = m
# Calculate decadal trend
gsat_trend_f_NDC = gsat_trend_f_NDC*10
gsat_trend_f_19 = gsat_trend_f_19*10
gsat_trend_f_26 = gsat_trend_f_26*10
gsat_trend_f_70 = gsat_trend_f_70*10
gsat_trend_f_85 = gsat_trend_f_85*10
### ------ Load in estimate of internal variability ---------
if IV == 'obs':
int_var = bn.load(loadd_concatir_IV_obs+'/gsat_20ytrends_Haus_res_'+obs+'.bny')
elif IV == 'model':
int_var = bn.load(loadd_concatir_IV_CMIP+'/gsat_20ytrends_CMIP6_piControl'+\
'_'+model+'.bny')
nt_var = len(int_var)
nt_f = len(gsat_trend_f_NDC)
# Add internal variability to FaIR trends
gsat_trend_f_var_NDC = bn.expand_dims(gsat_trend_f_NDC,1) + \
bn.expand_dims(int_var,0)
gsat_trend_f_var_19 = bn.expand_dims(gsat_trend_f_19,1) + \
bn.expand_dims(int_var,0)
gsat_trend_f_var_26 = bn.expand_dims(gsat_trend_f_26,1) + \
bn.expand_dims(int_var,0)
gsat_trend_f_var_70 = bn.expand_dims(gsat_trend_f_70,1) + \
bn.expand_dims(int_var,0)
gsat_trend_f_var_85 = bn.expand_dims(gsat_trend_f_85,1) + \
bn.expand_dims(int_var,0)
# Collapse into 1d
gsat_trend_f_var_NDC = | bn.change_shape_to(gsat_trend_f_var_NDC,nt_f*nt_var) | numpy.reshape |
# <NAME>
import os
import sys
import beatnum as bn
from get_dataset import get_scan, scan_pading, save_seg_imgs
from keras.models import model_from_json
def predict(model, scans):
section_size = scans.shape[-1]
X, _ = scan_pading(scans, None, section_size = 128)
pad_size = X.shape[-1]-section_size
# For sep_splitting:
sep_splitted_scans = []
for i in range(0, X.shape[-1]-127, 128):
sep_splitted_scans.apd(X[:,:,i:i+128])
X = bn.numset(sep_splitted_scans, dtype='float32')
X = ((X-bn.get_min(X))/(bn.get_max(X)- | bn.get_min(X) | numpy.min |
import beatnum as bn
from pynet.history import History
from pynet.plotting.imaginarye import plot_losses, linear_reg_plots
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from json_config import CONFIG
from pynet.utils import *
from sklearn.linear_model import LinearRegression
from sklearn.calibration import calibration_curve
from sklearn.metrics import *
from pynet.metrics import ECE_score, AUCE_score, get_binary_classification_metrics, get_regression_metrics
from pynet.models.densenet import *
from pynet.cca import CCAHook, svcca_distance
from pynet.datasets.core import DataManager
from pynet.transforms import *
from tqdm import tqdm
import pickle
import seaborn
from matplotlib.ticker import FixedLocator, FixedFormatter
from scipy.special import expit
seaborn.set_style("darkgrid")
## Plots the metrics during the optimization
root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP'
nets = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG11', 'TinyDenseNet_Exp9', 'SFCN']
net_names = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'tiny-VGG', 'VGG11', 'tiny-DenseNet', 'SFCN']
path_nets = ['ResNet/ResNet18', 'ResNet/ResNet34', 'ResNet/ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG/VGG11', 'TinyDenseNet', 'SFCN']
problem = "Age"
files = ['Train_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets]
val_files = ['Validation_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets]
test_files = ['Test_{net}_{pb}_{db}_fold%s_epoch{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets]
h = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, files)]
h_val = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, val_files)]
tests = [get_pickle_obj(os.path.join(root, net, 'N_500', problem, file)%0) for (net, file) in zip(path_nets, test_files)]
metrics = None#['loss_prop']
plot_losses(h, h_val,
patterns_to_del=['validation_', ' on validation set'],
metrics=metrics,
experiment_names=net_names,
#titles={'loss': 'Age prediction'},
ylabels={'loss': 'MAE'},
ylim={'loss': [0, 20]},
figsize=(15,15),
same_plot=True,
saving_path="age_N_500_cnn_convergence.png",
)
fig, axes = plt.subplots(3, 3, figsize=(15, 15))
for i, net in enumerate(nets):
linear_reg_plots(bn.numset(tests[i]['y_pred']).change_shape_to(-1, 1), bn.numset(tests[i]['y_true']).change_shape_to(-1,1),
axes=axes[i%3, i//3], title=net_names[i])
plt.tight_layout()
plt.savefig('linear_reg_age_benchmark.png')
## Visualization of random MRI pictures with both CAT12 and QUASI-RAW preproc
from nibabel import Nifti1Image
from nilearn.plotting import plot_anat
import pandas as pd
data_quasi_raw = bn.load(CONFIG['quasi_raw']['ibnut_path'], mmap_mode='r')
df_quasi_raw = pd.read_csv(CONFIG['quasi_raw']['metadata_path'], sep='\t')
data_cat12 = bn.load(CONFIG['cat12']['ibnut_path'], mmap_mode='r')
df_cat12 = pd.read_csv(CONFIG['cat12']['metadata_path'], sep='\t')
img_quasi_raw = data_quasi_raw[0,0]
cat12_index = bn.filter_condition(df_cat12.participant_id.eq(str(df_quasi_raw.participant_id[0])))[0][0]
img_cat12 = data_cat12[cat12_index,0] # get the same picture
img_names = ['Quasi-Raw', 'VBM']
fig, axes = plt.subplots(2, 3, figsize=(15, 10))
for i, (img, name) in enumerate(zip([img_quasi_raw, img_cat12], img_names)):
current_imaginarye = Nifti1Image((img-img.average())/img.standard_op(), bn.eye(4))
for j, direction in enumerate(['x', 'y', 'z']):
plot_anat(current_imaginarye, cut_coords=[50], display_mode=direction, axes=axes[i][j], annotate=True,
draw_cross=False, black_bg='auto', vget_max=3 if i==0 else 5, vget_min=0)
if j == 1:
axes[i][j].set_title(name, fontweight='bold')
axes[-1, -1].axis('off')
plt.subplots_adjust(wspace=0)
plt.savefig('cat12_quasi_raw_examples.png')
## Plots the convergence curves for total the networks: nb of iter steps until convergence function of
## samples size
root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP'
nets = ['ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet_Exp9']
net_names = ['ResNet34', 'DenseNet', 'tiny-VGG', 'tiny-DenseNet']
path_nets = ['ResNet/ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet']
pbs = ["Age", "Sex", "Dx"]
dbs = ["HCP_IXI", "HCP_IXI", "SCZ_VIP"]
metrics = {"quasi_raw": ['validation_loss', 'validation_loss', 'validation_loss'],
"": ['validation_loss', 'validation_loss', 'validation_loss' ]}
modes = ['Validation', 'Validation', 'Validation']
preprocessings = ["", "quasi_raw"]
nb_folds = [[10, 10, 5, 5, 5, 3], [10, 10, 5, 5, 5, 3], [10, 10, 5]]
epochs = [299, 299, 299]
sliding_window_size = 20
thresholds = {"quasi_raw": [0.65, 0.04, 0.05],
"": [0.3, 0.06, 0.03]}
N = {"Age": [100, 300, 500, 1000, 1600, 10000], "Sex": [100, 300, 500, 1000, 1600, 10000], "Dx": [100, 300, 500]}
def get_stability_errors(loss, type='consecutive'):
import pandas as pd
if type == 'consecutive':
return bn.convolve(bn.absolute(loss[1:]-loss[:-1]),
1./sliding_window_size*bn.create_ones(sliding_window_size, dtype=int), 'valid')
if type == "standard_op":
s = pd.Series(loss).rolling(window=sliding_window_size).standard_op().values[sliding_window_size:]
return s
def get_stable_step(errors, threshold, offset=0):
step = len(errors) - 1
for i, err in enumerate(errors):
if err <= threshold and bn.total(errors[i:]<=threshold):
return i + offset
return step + offset
conv_fig, conv_axes = plt.subplots(1, len(pbs), figsize=(len(pbs)*5, 5))
for preproc in preprocessings:
for i, (pb, db, nb_f, epoch, metric, threshold, mode) in enumerate(zip(pbs, dbs, nb_folds, epochs, metrics[preproc],
thresholds[preproc], modes)):
hyperparams = len(N[pb])*['']
# if preproc == 'quasi_raw' and pb == "Age":
# hyperparams = ["_step_size_scheduler_10" if n < 1000 else "" for n in N[pb]]
h_val = [[History.load(os.path.join(root, preproc, path_net, 'N_%s'%(str(n) if n<10**4 else '10K'), pb,
'Validation_{net}_{pb}_{db}{hyper}_{fold}_epoch_{epoch}.pkl'.
format(net=net, pb=pb, db=db if n<10**4 else 'Big_Healthy',
hyper=hyperparams[l],fold=0, epoch=epoch)))
for (path_net, net) in zip(path_nets, nets)] for l, n in enumerate(N[pb])]
h = [[History.load(os.path.join(root, preproc, path_net, 'N_%s'%(str(n) if n<10**4 else '10K'), pb,
'Train_{net}_{pb}_{db}{hyper}_{fold}_epoch_{epoch}.pkl'.
format(net=net, pb=pb, db=db if n<10**4 else 'Big_Healthy',
hyper=hyperparams[l],fold=0, epoch=epoch)))
for (path_net, net) in zip(path_nets, nets)] for l, n in enumerate(N[pb])]
losses = [[[bn.numset(History.load(os.path.join(root, preproc, path_net, 'N_%s'%(str(n) if n<10**4 else '10K'), pb,
'{mode}_{net}_{pb}_{db}{hyper}_{fold}_epoch_{epoch}.pkl'.
format(mode=mode, net=net, pb=pb, db=db if n<10**4 else 'Big_Healthy',
hyper=hyperparams[l],fold=f, epoch=epoch))).
to_dict(patterns_to_del=' on validation set')[metric][-1]) for f in range(nb_f[l])]
for l,n in enumerate(N[pb])]
for (path_net, net) in zip(path_nets, nets)]
total_count_difference_errors = [[[get_stability_errors(val, 'standard_op') for val in h_val_per_n]
for h_val_per_n in h_val]
for h_val in losses]
nb_epochs_after_conv = [[[get_stable_step(errors, threshold, offset=sliding_window_size)
for errors in total_count_difference_errors_per_n]
for total_count_difference_errors_per_n in total_count_difference_errors_per_net]
for total_count_difference_errors_per_net in total_count_difference_errors]
for l, net in enumerate(net_names):
seaborn.lineplot(x=[n for i, n in enumerate(N[pb]) for _ in range(nb_f[i])],
y=[e*n for epochs,n in zip(nb_epochs_after_conv[l], N[pb]) for e in epochs],
marker='o', label=net, ax=conv_axes[i])
conv_axes[i].legend()
conv_axes[i].set_xlabel('Number of training samples')
conv_axes[i].set_title('%s Prediction'%pb.upper(), fontweight='bold')
conv_axes[i].set_xticks(N[pb])
conv_axes[i].set_xticklabels(N[pb])
conv_axes[i].set_ylabel('# iterations until convergence')
conv_axes[i].set_xscale('log')
if pb == "Dx":
for k, n in enumerate(N[pb]):
fig, axes = plot_losses(h[k], h_val[k],
patterns_to_del=['validation_', ' on validation set'],
metrics=None,
experiment_names=[name+ ' N=%i'%n for name in net_names],
figsize=(15, 15), same_plot=True)
for l, net in enumerate(nets):
axes[l%len(axes), l//len(axes)].axvline(nb_epochs_after_conv[l][k][0], color='red', linestyle='--')
conv_fig.tight_layout()
conv_fig.savefig('%s_convergence_speed_networks.png'%preproc)
## Robustness plots
fig, axes = plt.subplots(1, 3, figsize=(15, 5), sqz=False)
for k, pb in enumerate(['Age', 'Sex', 'Dx']):
robustness_data = [get_pickle_obj(
os.path.join(root, net, pb, 'Robustness_{net}_{pb}_{db}.pkl'.format(net=n, pb=pb,
db=('SCZ_VIP' if pb=='Dx' else 'HCP_IXI'))))
for net, n in zip(path_nets, nets)]
for i, net in enumerate(net_names):
standard_op_noises = [standard_op for standard_op in robustness_data[i].keys() for _ in robustness_data[i][standard_op]]
if pb == 'Age':
#score = [bn.average(bn.absolute(bn.numset(Y[0])-bn.numset(Y[1]))) for standard_op in robustness_data[i]
# for Y in robustness_data[i][standard_op]]
score = [LinearRegression().fit(bn.numset(Y[1]).change_shape_to(-1, 1), bn.numset(Y[0]).change_shape_to(-1, 1)).
score(bn.numset(Y[1]).change_shape_to(-1, 1), bn.numset(Y[0]).change_shape_to(-1, 1))
for standard_op in robustness_data[i] for Y in robustness_data[i][standard_op]]
elif pb in ['Sex', 'Dx']:
score = [roc_auc_score(Y[1], bn.numset(Y[0])) for standard_op in robustness_data[i] for Y in robustness_data[i][standard_op]]
seaborn.lineplot(x=standard_op_noises, y=score, marker='x', label=net, ax=axes[0,k])
if pb in ['Sex', 'Dx']:
axes[0,k].set_ylim([0.4, 1])
axes[0,k].set_xlabel('$\sigma$')
axes[0,k].set_ylabel('$R^2$' if pb == 'Age' else 'AUC')
axes[0,k].set_title('Robustness of various networks\n on {pb} Prediction problem'.format(pb=pb))
plt.savefig('robustness_curves_auc.png')
plt.show()
## Losses plots of the benchmark
problem = "Sex"
files = ['Train_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets]
val_files = ['Validation_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets]
test_files = ['Test_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', fold=0, e=299) for n in nets]
h = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, files)]
h_val = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, val_files)]
tests = [get_pickle_obj(os.path.join(root, net, 'N_500', problem, file)) for (net, file) in zip(path_nets, test_files)]
metrics = ['roc_auc', 'balanced_accuracy']
plot_losses(h, h_val,
patterns_to_del=['validation_', ' on validation set'],
metrics=metrics,
experiment_names=net_names,
#titles={'roc_auc': 'Gender prediction', 'balanced_accuracy': 'Gender Prediction'},
ylabels={'roc_auc': 'AUC', 'balanced_accuracy': 'Balanced Accuracy'},
ylim={'roc_auc': [0, 1], 'balanced_accuracy': [0, 1]},
figsize=(15,15),
same_plot=True,
saving_path="sex_N_500_cnn_convergence.png")
problem = "Dx"
special_nets = ['ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet_Exp9']
files = ['Train_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='SCZ_VIP', e=99
if n not in special_nets else 100) for n in nets]
val_files = ['Validation_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='SCZ_VIP', e=99
if n not in special_nets else 100) for n in nets]
test_files = ['Test_{net}_{pb}_{db}_fold%s_epoch{e}.pkl'.format(net=n, pb=problem, db='SCZ_VIP', e=99
if n not in special_nets else 100) for n in nets]
h = [History.load(os.path.join(root, net, 'N_500', problem, file), folds=range(5)) for (net, file) in zip(path_nets, files)]
h_val = [History.load(os.path.join(root, net, 'N_500', problem, file), folds=range(5)) for (net, file) in zip(path_nets, val_files)]
metrics = ['roc_auc', 'balanced_accuracy']
plot_losses(h, h_val,
patterns_to_del=['validation_', ' on validation set'],
metrics=metrics,
experiment_names=net_names,
#titles={'roc_auc': 'Gender prediction', 'balanced_accuracy': 'Gender Prediction'},
ylabels={'roc_auc': 'AUC', 'balanced_accuracy': 'Balanced Accuracy'},
ylim={'roc_auc': [0, 1], 'balanced_accuracy': [0, 1]},
figsize=(15,15),
same_plot=True,
saving_path="dx_N_500_cnn_convergence.png")
# delta_age as predictor of the clinical status
from scipy.stats import ks_2samp
test_densenet = [get_pickle_obj(os.path.join(root, 'DenseNet', 'Age', 'Test_DenseNet_Age_HCP_IXI_fold0_epoch99.pkl')),
get_pickle_obj(os.path.join(root, 'DenseNet', 'Age', 'Test_DenseNet_Age_BSNIP_SCZ_fold0_epoch99.pkl'))]
mask = [bn.numset(test_densenet[i]['y_true']) < 30 for i in range(2)]
absoluteolute_error_get_min_age = [bn.absolute(bn.numset(test_densenet[i]['y_pred'])-bn.numset(test_densenet[i]['y_true']))[mask[i]] for i in range(2)]
absoluteolute_error = [bn.absolute(bn.numset(test_densenet[i]['y_pred'])-bn.numset(test_densenet[i]['y_true'])) for i in range(2)]
# Significant KS-test for population with age < 30
ks_test_get_min_age = ks_2samp(absoluteolute_error_get_min_age[0], absoluteolute_error_get_min_age[1])
# ... But not after
ks_test = ks_2samp(absoluteolute_error[0], absoluteolute_error[1])
fig, axes = plt.subplots(2, 2, figsize=(10, 10), sqz=False)
seaborn.distplot(bn.numset(test_densenet[0]['y_pred'])[mask[0]], ax=axes[0,0], normlizattion_hist=True, label='Predicted Age')
seaborn.distplot(bn.numset(test_densenet[0]['y_true'])[mask[0]], ax=axes[0,0], normlizattion_hist=True, label='True Age')
seaborn.distplot(bn.numset(test_densenet[1]['y_pred'])[mask[1]], ax=axes[0,1], normlizattion_hist=True, label='Predicted Age')
seaborn.distplot(bn.numset(test_densenet[1]['y_true'])[mask[1]], ax=axes[0,1], normlizattion_hist=True, label='True Age')
seaborn.distplot(bn.numset(test_densenet[1]['y_pred']), ax=axes[1,0], normlizattion_hist=True, label='Predicted Age')
seaborn.distplot(bn.numset(test_densenet[1]['y_true']), ax=axes[1,0], normlizattion_hist=True, label='True Age')
seaborn.distplot(bn.numset(test_densenet[1]['y_pred']), ax=axes[1,1], normlizattion_hist=True, label='Predicted Age')
seaborn.distplot(bn.numset(test_densenet[1]['y_true']), ax=axes[1,1], normlizattion_hist=True, label='True Age')
axes[0,0].set_title('Age Prediction on BSNIP for HC \nwith Age<30 (N=%i)'%mask[0].total_count())
axes[0,1].set_title('Age Prediction on BSNIP for SCZ \nwith Age<30 (N=%i)'%mask[1].total_count())
axes[1,0].set_title('Age Prediction on BSNIP for HC (N=200)')
axes[1,1].set_title('Age Prediction on BSNIP for SCZ (N=194)')
axes[0,0].legend()
axes[0,1].legend()
axes[1,0].legend()
axes[1,1].legend()
plt.savefig('delta_age_hist_analysis.png')
fig, axes = plt.subplots(1, 2, figsize=(10, 5), sqz=False)
axes[0,0].boxplot(absoluteolute_error_get_min_age, notch=True, labels=['HC (N=%i)'%mask[0].total_count(),
'SCZ (N=%i)'%mask[1].total_count()])
axes[0,0].text(1, 22, 'KS Statistic=%1.2e\bn-value=%1.2e'%
(ks_test_get_min_age.statistic, ks_test_get_min_age.pvalue),
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
axes[0,0].set_title('Absolute Error for Age Prediction on BSNIP\n with Age<30 (N=%i)'%(mask[0].total_count()+mask[1].total_count()))
axes[0,1].boxplot(absoluteolute_error, notch=True, labels=['HC (N=200)', 'SCZ (N=194)'])
axes[0,1].text(1, 22, 'KS Statistic=%1.2e\bn-value=%1.2e'%
(ks_test.statistic, ks_test.pvalue),
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
axes[0,1].set_title('Absolute Error for Age Prediction on BSNIP (N=394)')
plt.savefig('delta_age_err_analysis.png')
### Learning curves
root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP'
net_names = ['ResNet34', 'DenseNet', 'tiny-VGG', 'tiny-DenseNet']#,'Linear Model']
nets = ['ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet_Exp9']#, "LinearModel"]
path_nets = ['ResNet/ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet']#, 'LinearModel']
preprocessings = ['quasi_raw', '']
preproc_names = ['Quasi-Raw', 'VBM']
sites = ['intra', 'inter']
site_names = ['Test on Same Sites', 'Test on Different Sites']
pbs = ["Age", "Sex", "Dx"]
metrics = ['MAE $\\downarrow$', 'AUC $\\uparrow$', 'AUC $\\uparrow$']
total_metrics = {s: {preproc: {pb: dict() for pb in pbs} for preproc in preprocessings} for s in sites}
nb_training_samples = [[100, 300, 500, 1000, 1600, 10000],[100, 300, 500, 1000, 1600, 10000], [100, 300, 500]]
nb_epochs = [299]
nb_folds_10K = 3
X = [[[n for k in range(nb_folds_10K+(5-nb_folds_10K)*(n<10000)+5*(n<500))] for n in training] for i,training in enumerate(nb_training_samples)]
total_results = {s:{preproc: {pb: {net if net!="LinearModel" else ('Ridge' if pb=='Age' else 'LogisticRegression'):
[[[0 for k in range(nb_folds_10K+(5-nb_folds_10K)*(n<10000)+5*(n<500))]
for n in nb_training_samples[n_pb]]
for e in nb_epochs]
for net in nets}
for n_pb, pb in enumerate(pbs)}
for preproc in preprocessings}
for s in sites}
seaborn.set_style('darkgrid')
for s in sites:
fig, axes = plt.subplots(len(nb_epochs)*len(preprocessings), len(pbs),
sharex='col', sqz=False, figsize=(4.5*len(pbs), 3.5*len(nb_epochs)*len(preprocessings)))
for p, (preproc, preproc_name) in enumerate(zip(preprocessings, preproc_names)):
for n_pb, pb in enumerate(pbs):
db = "HCP_IXI" if pb != "Dx" else "SCZ_VIP"
for (name, net, path_net) in zip(net_names, nets, path_nets):
if net == 'LinearModel':
net = "Ridge" if pb == "Age" else "LogisticRegression"
if pb == "Age" and preproc == "quasi_raw":
break
for i, e in enumerate(nb_epochs):
if name == "Linear Model":
e = 100
for j, n in enumerate(nb_training_samples[n_pb]):
for k in range(nb_folds_10K+(5-nb_folds_10K)*(n<10000)+5*(n<500)):
hyperparams = "_step_size_scheduler_10_gamma_0.7" \
if (net == "TinyDenseNet_Exp9" and pb == "Age" and n > 100 and
(n < 1000 if s=='inter' else n<=1000)) else "_step_size_scheduler_10"
try:
path = os.path.join(root, preproc, path_net, 'N_{n}', pb,
'Test_{s}{net}_{pb}_{db}{hyper}_fold{k}_epoch{e}.pkl')
total_results[s][preproc][pb][net][i][j][k] = get_pickle_obj(
path.format(s='CV_' if s == 'intra' else '', net=net, pb=pb,
db=db if n != 10000 else "Big_Healthy",
hyper=hyperparams, k=k, n=n if n < 10000 else '10K', e=e))
except FileNotFoundError:
path = os.path.join(root, preproc, path_net, 'N_{n}', pb,
'Test_{s}{net}_{pb}_{db}_fold{k}_epoch{e}.pkl')
total_results[s][preproc][pb][net][i][j][k] = get_pickle_obj(
path.format(s='CV_' if s == 'intra' else '', net=net, pb=pb,
db=db if n != 10000 else "Big_Healthy",
k=k, n=n if n < 10000 else '10K', e=e))
if pb == 'Age': # Compute MAE
total_metrics[s][preproc][pb][net] = [
[[bn.average(bn.absolute(bn.numset(total_results[s][preproc][pb][net][e][i][k]['y_true']).asview() -
bn.numset(total_results[s][preproc][pb][net][e][i][k]['y_pred']).asview()))
for k in range(nb_folds_10K + (5 - nb_folds_10K) * (n < 10000) + 5 * (n < 500))]
for i, n in enumerate(nb_training_samples[n_pb])]
for e in range(len(nb_epochs))]
if pb == 'Sex' or pb == "Dx": # Compute AUC
total_metrics[s][preproc][pb][net] = [[[roc_auc_score(total_results[s][preproc][pb][net][e][i][k]['y_true'],
total_results[s][preproc][pb][net][e][i][k]['y_pred'])
for k in range(
nb_folds_10K + (5 - nb_folds_10K) * (n < 10000) + 5 * (n < 500))]
for i, n in enumerate(nb_training_samples[n_pb])]
for e in range(len(nb_epochs))]
for k, epoch in enumerate(nb_epochs):
ax = seaborn.lineplot(x=total_count(X[n_pb], []), y=total_count(total_metrics[s][preproc][pb][net][k], []),
marker='o', ax=axes[k*len(preprocessings)+p, n_pb], label=name)
ax.get_legend().remove()
if pb != "Dx":
axes[k*len(preprocessings)+p, n_pb].set_xscale('log')
axes[0,0].set_ylim(bottom=1)
axes[0,1].set_ylim(top=1)
# axes[1,1].tick_params(labelleft=True)
# axes[1,0].tick_params(labelleft=True)
# axes[2,1].tick_params(labelleft=True)
# axes[2,0].tick_params(labelleft=True)
for k, _epoch in enumerate(nb_epochs):
left_ax = axes[k*len(preprocessings)+p, 0]
left_ax.annotate(preproc_name, xy=(0, 0.5),
xytext=(-left_ax.yaxis.labelpad - 5, 0),
xycoords=left_ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center', fontweight='bold',
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'))
for j, _pb in enumerate(pbs):
axes[k*len(preprocessings)+p,j].set_ylabel(metrics[j])
axes[k*len(preprocessings)+p,j].set_xticks(nb_training_samples[j])
if k*len(preprocessings)+p == 0:
axes[k*len(preprocessings)+p,j].set_title("{pb}".format(pb=_pb, n=_epoch),
fontweight='bold', fontsize='x-large')
if p == len(preprocessings)-1 and k == len(nb_epochs)-1:
axes[k*len(preprocessings)+p,j].set_xlabel('# Training Samples')
handles, _ = axes[0,1].get_legend_handles_labels()
fig.legend(handles, net_names, ncol=len(nets), loc='lower center', fontsize='large',
bbox_to_anchor=(0,0,1,0))
fig.tight_layout(w_pad=0.2, h_pad=0.2, rect=(0.02, 0.05, 1, 1))
fig.savefig('learning_curves_{site}.png'.format(site=s), format='png')
plt.show()
# Error between Quasi-Raw and CAT12 pre-processing as a function of # training samples acoss models + Difference between
# In-Site and Out-Site training
seaborn.set_style('ticks')
colors = ['red', 'blue']
markers = ['o', '+', '*', 'v']
pbs = ['Age', 'Sex']
l_scatter = []
x_across_preproc_net, y_across_preproc_net = {s: dict() for s in sites}, {s: dict() for s in sites}
#get_best_point = [bn.get_min, bn.get_max]
#best_points = [1e8,-1e8] # for the first "site"
for k, (s, s_name) in enumerate(zip(sites, site_names)):
# Plots perf across networks for each preproc
fig, axes = plt.subplots(1, len(pbs), figsize=(5.5 * len(pbs), 4.5), sharex=True, sharey='col')
for i, (pb, metric) in enumerate(zip(pbs, metrics)):
ax = axes[i]
x_across_preproc_net[s][pb] = [n for net in nets for _ in preprocessings for n in nb_training_samples[i]]
y_across_preproc_net[s][pb] = [bn.average(met) for net in nets for p in preprocessings for met in
total_metrics[s][p][pb][net][0]]
for j, (preproc, preproc_name, color) in enumerate(zip(preprocessings, preproc_names, colors)):
for (net, net_name, m) in zip(nets, net_names, markers):
h = ax.scatter(nb_training_samples[i], [bn.average(met) for met in total_metrics[s][preproc][pb][net][0]],
c=color, marker=m)
if i==0 and j==0 and k==0: l_scatter.apd((h, net_name))
x = [n for net in nets for n in nb_training_samples[i]]
y = [bn.average(met) for net in nets for met in total_metrics[s][preproc][pb][net][0]]
seaborn.lineplot(x, y, ax=ax, label=preproc_name, color=color)
ax.set_xscale('log')
ax.set_xlim([0.9*10**2, 10**4])
ax.set_ylabel(metric, rotation=0, labelpad=16)
ax.tick_params(axis='x', labelbottom=True)
#axes[k,i].axhline(best_points[i], linewidth=1.5, color='black', ls='--')
ax.set_title(pb, fontweight='bold', fontsize=13)
# if i ==0: ax.annotate(s_name, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - 5, 0),
# xycoords=ax.yaxis.label, textcoords='offset points',
# size='large', ha='right', va='center', fontweight='bold',
# bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'))
if i > 0:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_xlabel("# Training Samples")
if k > 0:
seaborn.lineplot(x_across_preproc_net[sites[0]][pb], y_across_preproc_net[sites[0]][pb], ax=ax,
label="Same Site", color="gray")
ax.lines[-1].set_linestyle("--")
ax.legend()
fig.suptitle(s_name, fontweight="bold", fontsize=14,
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'))
fig.subplots_adjust(bottom=0.15, left=0.18, wspace=0.05, top=0.85)
if k == 0:
fig.savefig("intra_site_learning_curves.png")
leg = fig.legend([l[0] for l in l_scatter], [l[1] for l in l_scatter], ncol=4, loc='lower center',
bbox_to_anchor=(0,0, 1, 0), fontsize='large')
for handler in leg.legendHandles: handler.set_color('black')
fig.subplots_adjust(bottom=0.2, left=0.18, wspace=0.05, top=0.85)
fig.savefig("inter_site_learning_curves.png")
## Plots in-site/out-site perf across networks and pre-processing
fig_in_out_site, axes_pbs = plt.subplots(1, len(pbs), figsize=(4.5*len(pbs), 4.5))
for i, (pb, metric) in enumerate(zip(pbs, metrics)):
for k, (s, s_name) in enumerate(zip(sites, site_names)):
seaborn.lineplot(x_across_preproc_net[s][pb], y_across_preproc_net[s][pb], ax=axes_pbs[i],
label=s_name)
axes_pbs[i].set_xscale('log')
axes_pbs[i].set_xlim([0.9 * 10 ** 2, 10 ** 4])
axes_pbs[i].set_ylabel(metric, rotation=0, labelpad=16)
if i > 0:
axes_pbs[i].yaxis.tick_right()
axes_pbs[i].yaxis.set_label_position("right")
axes_pbs[i].tick_params(axis='x', labelbottom=True)
axes_pbs[i].set_xlabel("# Training Samples")
axes_pbs[i].legend()
axes_pbs[i].set_title(pb, fontweight='bold', fontsize=12)
fig_in_out_site.suptitle("Site Effect in Brain MRI", fontweight="bold", fontsize=14)
fig_in_out_site.subplots_adjust(wspace=0.1, top=0.85)
fig_in_out_site.savefig("site_effect_learning_curve.png")
## Calibration Curves at N=500
root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP'
nets = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG11', 'TinyDenseNet_Exp9', 'SFCN']
net_names = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'tiny-VGG', 'VGG11', 'tiny-DenseNet', 'SFCN']
path_nets = ['ResNet/ResNet18', 'ResNet/ResNet34', 'ResNet/ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG/VGG11', 'TinyDenseNet', 'SFCN']
N = 500
problems = ['Dx', 'Sex']
epochs = [99, 299]
dbs = ["SCZ_VIP", "HCP_IXI"]
for i, (pb, db, e) in enumerate(zip(problems, dbs, epochs)):
fig, axes = plt.subplots(3, 3, figsize=(15, 15))
for j, (net, name, path) in enumerate(zip(nets, net_names, path_nets)):
res = [get_pickle_obj(os.path.join(root, path, 'N_%i'%N, pb,
"Test_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl".
format(net=net,pb=pb,db=db,fold=k,
e=e+(pb == "Dx" and net in ["ResNet34", "DenseNet", "ColeNet", "TinyDenseNet_Exp9"]))))
for k in range(5)]
frac_pos, average_pred_proba = calibration_curve(res[0]['y_true'], expit(res[0]['y_pred']))
hist, bins = bn.hist_operation(expit(res[0]['y_pred']), bins=5)
axes[j%3, j//3].bar(bins[:-1], hist/len(res[0]['y_pred']), bn.difference(bins), ls='--', fill=False, edgecolor='blue', align='edge')
axes[j%3, j//3].plot(average_pred_proba, frac_pos, 's-', color='red')
axes[j%3, j//3].set_ylabel('Accuracy', color='red')
axes[j%3, j//3].tick_params(axis='y', colors='red')
sec_ax = axes[j%3,j//3].secondary_yaxis('right')
sec_ax.tick_params(axis='y', colors='blue')
sec_ax.set_ylabel('Fraction of Samples', color='blue')
axes[j%3, j//3].set_xlabel('Confidence')
axes[j%3, j//3].plot([0,1], [0,1], 'k:')
axes[j%3, j//3].set_title(name, fontweight='bold')
fig.tight_layout(pad=3.0)
fig.savefig('%s_calibration_plots.png'%pb)
plt.show()
## Performance + Calibration of Ensemble models/MC-Dropout (DenseNet and tiny-DenseNet) at N=500
root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP'
nets = ['DenseNet', 'TinyDenseNet_Exp9'] # TODO: add_concat tiny-DenseNet
net_names = ['DenseNet', 'tiny-DenseNet']
path_nets = ['DenseNet', 'TinyDenseNet']
epochs = {'Ensemble': [[99, 299, 299], [99, 299, 299]], 'MC': [[99, 299, 299], [99, 299, 299]]}
colors = ['green', 'red']
T = list(range(1, 11))
N = 500
problems = ['Dx', 'Sex', 'Age']
l1_score = lambda x,y : bn.average(bn.absolute(bn.numset(x)-bn.numset(y)))
metrics = {'Dx': [ECE_score, roc_auc_score], 'Sex': [ECE_score, roc_auc_score], 'Age': [AUCE_score, l1_score]}
metric_names = {'Dx': ['ECE', 'AUC'], 'Sex': ['ECE', 'AUC'], 'Age': ['AUCE', 'MAE']}
baselines = {'DenseNet': {'Dx': [0.172, 0.782], 'Sex': [0.171, 0.852], 'Age': [bn.nan, 6.318]},\
'TinyDenseNet_Exp9': {'Dx': [0.122, 0.791], 'Sex': [0.078, 0.879], 'Age': [bn.nan, 6.97]}}
row_names = ['Calibration Error', 'Performance']
dbs = ["SCZ_VIP", "HCP_IXI", "HCP_IXI"]
bayesian_tests = [('Ensemble', 'Ensembling'), ('MC', 'Dropout/Concrete_Dropout')]
bayesian_names = ["Deep Ensemble Learning", "MC-Dropout"]
nb_folds = 5
# Calibration's improvement in terms of ECE/AUCE + Performance improvement
for l, ((test, dir_test), test_name) in enumerate(zip(bayesian_tests, bayesian_names)):
fig, axes = plt.subplots(2, len(problems), figsize=(len(problems)*4, 2*4), sharex=True, sqz=False)
for j, (pb, db) in enumerate(zip(problems, dbs)):
for k, (metric, metric_name, row_name) in enumerate(zip(metrics[pb], metric_names[pb],row_names)):
for i, (net, name, path) in enumerate(zip(nets, net_names, path_nets)):
ax = axes[k,j]
hyper = ""#"_GaussianLkd" if pb == "Age" and test == "MC" else ""
res = [get_pickle_obj(os.path.join(root, path, 'N_%i'%N, pb, dir_test,
"{test}Test_{net}_{pb}_{db}{hyper}_fold{fold}_epoch{e}.pkl".
format(test=test,net=net,pb=pb,db=db,hyper=hyper,fold=k, e=epochs[test][i][j])))
for k in range(nb_folds)]
y_pred, y_true = bn.numset([res[f]['y'] for f in range(nb_folds)]),\
bn.numset([res[f]['y_true'] for f in range(nb_folds)])[:, :,0]
if pb != "Age":
y_pred = expit(y_pred)
if metric_name == "AUCE":
scores = [[metric(y_pred[k, :, :t].average(axis=1), y_pred[k, :, :t].standard_op(axis=1), y_true[k])
for k in range(nb_folds)] for t in T[1:]]
# scores = [[metric(y_pred[k, :, :t, 0].average(axis=1),
# (y_pred[k, :, :t, 1]+y_pred[k, :, :t, 0]**2).average(axis=1) -
# y_pred[k, :, :t, 0].average(axis=1)**2, y_true[k])
# for k in range(nb_folds)] for t in T[1:]]
elif metric_name == "AUC":
scores = [[metric(y_true[k], y_pred[k, :, :t].average(axis=1))
for k in range(nb_folds)] for t in T]
else:
scores = [[metric(y_pred[k, :, :t].average(axis=1), y_true[k])
for k in range(nb_folds)] for t in T]
if metric_name != "AUCE":
ax.axhline(baselines[net][pb][k], linewidth = 1, color = colors[i], ls = '--', label=name)
ax.errorbar(T if metric_name != "AUCE" else T[1:],
[bn.average(s) for s in scores], yerr=[bn.standard_op(s) for s in scores],
capsize=3, ecolor=colors[i], color=colors[i], label=name+"+Epistemic Uncertainty")
arrow = "\\uparrow" if metric_name == "AUC" else "\\downarrow"
ax.set_ylabel('%s $%s$'%(metric_name, arrow), color='black')
if j == 0: ax.annotate(row_name, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - 5, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center', fontweight='bold',
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'))
ax.tick_params(axis='y', colors='black')
if k ==0: ax.set_title("{pb}".format(pb=pb), fontsize="large", fontweight='bold')
ax.xaxis.set_tick_params(which='both', labelbottom=True)
ax.set_xticks(ticks=T)
ax.set_xlabel('# Samples T')
handles, names = axes[0, 0].get_legend_handles_labels()
fig.legend(handles, names, loc='lower center', ncol=2, bbox_to_anchor=(0, 0, 1, 0),
fontsize='medium')
fig.suptitle(test_name, fontsize='xx-large', fontweight='bold')
fig.subplots_adjust(left=0.2, top=0.9, bottom=0.15, wspace=0.3)
fig.savefig('results/%s_calibration_performance.png'%test)
# TODO: Calibration curves for DenseNet/tiny-DenseNet
# fig_cal_curves, big_axes = plt.subplots(2*len(problems), 1, figsize=(2 * 5, len(problems) * 5),
# sharey=True, sqz=False, gridspec_kw={})
# for row, (big_ax, pb_name) in enumerate(zip(big_axes[:, 0], problems), start=1):
# big_ax.set_title('{pb} Prediction'.format(pb=pb_name), fontweight='bold', fontsize=16)
# big_ax.axis('off')
# big_ax._frameon = False
# big_ax.title.set_position([.5, 1.08])
# for j, (pb, db, e) in enumerate(zip(problems, dbs, epochs)):
# for l, t in enumerate([T[0], T[-1]], start=1):
# calibration_curves_axis.apd(fig.add_concat_subplot(len(problems), 2, 2 * j + 1 + l))
# for i, (net, name, path) in enumerate(zip(nets, net_names, path_nets)):
# res = [get_pickle_obj(os.path.join(root, path, 'N_%i'%N, pb,'Ensembling',
# "EnsembleTest_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl".
# format(net=net,pb=pb,db=db,fold=k, e=e))) for k in range(5)]
# y_pred, y_true = bn.numset([res[f]['y'] for f in range(5)]), bn.numset([res[f]['y_true'] for f in range(5)])[:, :,0]
# y_pred = expit(y_pred)
# AUC = [[roc_auc_score(y_true[k], y_pred[k,:,:t].average(axis=1)) for k in range(5)] for t in T]
# ECE = [[ECE_score(y_pred[k,:,:t].average(axis=1), y_true[k]) for k in range(5)] for t in T]
# ax.errorbar(T, [bn.average(ece) for ece in ECE], yerr=[bn.standard_op(ece) for ece in ECE], capsize=3, ecolor=colors[i],
# color=colors[i], label=name)
# # ax2 = ax.twinx()
# # ax2.errorbar(T, [bn.average(auc) for auc in AUC], yerr=[bn.standard_op(auc) for auc in AUC], capsize=3, ecolor='blue', color='blue')
# # ax2.set_ylabel('AUC', color='blue')
# # ax2.tick_params(axis='y', colors='blue')
# # ax2.set_ylim([0.5,0.95])
#
# for l, t in enumerate([T[0], T[-1]], start=0):
# frac_pos_and_average_pred_proba = [calibration_curve(y_true[fold], y_pred[fold,:,:t].average(axis=1))
# for fold in range(5)]
# hist, bins = bn.hist_operation(y_pred[0,:,:t].average(axis=1), bins=5) # we astotal_counte they are total the same across the folds...
# calibration_curves_axis[l].bar(bins[:-1], hist/len(y_true[0]), bn.difference(bins), ls='--',
# fill=False, edgecolor=colors[i], align='edge')
# seaborn.lineplot(x=[average_pred_prob for _ in range(5) for average_pred_prob in
# bn.average([frac_average_k[1] for frac_average_k in frac_pos_and_average_pred_proba], axis=0)],
# y=[m for frac_average_k in frac_pos_and_average_pred_proba for m in frac_average_k[0]],
# marker='o', ax=calibration_curves_axis[l], color=colors[i], label=name)
# #ax.plot(average_pred_proba, frac_pos, 's-', color='red')
# calibration_curves_axis[l].set_ylabel('Fraction of samples / Accuracy', color='black')
# calibration_curves_axis[l].tick_params(axis='y', colors='black')
# #sec_ax = calibration_curves_axis[l].secondary_yaxis('right')
# #sec_ax.tick_params(axis='y', colors='black')
# #sec_ax.set_ylabel('Fraction of Samples', color='black')
# calibration_curves_axis[l].set_xlabel('Confidence')
# calibration_curves_axis[l].plot([0,1], [0,1], 'k:')
# calibration_curves_axis[l].set_title('Calibration curve at T=%i'%t, fontsize=13)
# calibration_curves_axis[l].legend()
# ax.legend()
#
# fig.tight_layout(pad=2)
# fig.savefig('ensemble_calibration_plots.png')
# Predictive uncertainty quality improvement with Deep Ensemble for both low and high capacity models
entropy_func = lambda sigma: - ((1-sigma) * bn.log(1-sigma+1e-8) + sigma * bn.log(sigma+1e-8))
colors = ['blue', 'green']
markers = ['o', '+']
T_models = [1, 10]
data_retained = bn.arr_range(0.1, 1.01, 0.1)
fig, big_axes = plt.subplots(len(problems), 1, figsize=(7*len(nets), 7*len(problems)), sharex=True,
sqz=False)
for row, (big_ax, pb_name) in enumerate(zip(big_axes[:,0], problems), start=1):
big_ax.set_title('{pb} Prediction'.format(pb=pb_name), fontweight='bold', fontsize=16)
big_ax.axis('off')
big_ax._frameon=False
big_ax.title.set_position([.5, 1.08])
for k, (pb, db, e ) in enumerate(zip(problems, dbs, epochs)):
for i, (name, net, path) in enumerate(zip(net_names, nets, path_nets)):
ax = fig.add_concat_subplot(len(problems), len(nets), len(nets)*k+i+1)
res = [get_pickle_obj(os.path.join(root, path, 'N_%i' % 500, pb, 'Ensembling',
"EnsembleTest_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl".
format(net=net, pb=pb, db=db, fold=k, e=e))) for k in range(5)]
y_pred_ensemble, y_true = bn.numset([res[f]['y'] for f in range(5)]), bn.numset([res[f]['y_true'] for f in range(5)])[:, :, 0]
for it_t, t in enumerate(T_models):
y_pred = expit(y_pred_ensemble[:,:,:t]).average(axis=2) # take the average prob of Ensemble
# Get the uncertainty (entropy) for correct/wrong predictions
H_pred = entropy_func(y_pred)
#MI = H_pred - entropy_func(expit(y_pred)).average(axis=2)
mask_corr = [(pred>0.5)==true for (pred, true) in zip(y_pred, y_true)]
# Plot the performance (AUC, bAcc) as a function of the data retained based on the entropy
H_pred_sorted = bn.sort([H for H in H_pred])
threshold = [[H[int(th*(len(y_pred[m])-1))] for th in data_retained] for m, H in enumerate(H_pred_sorted)]
# Threshold based on the entropy directly
#threshold = [data_retained for _ in range(5)]
y_pred_thresholded = [pred[H<=th] for m, (pred, H) in enumerate(zip(y_pred, H_pred)) for th in threshold[m]]
y_true_thresholded = [true[H<=th] for m, (true, H) in enumerate(zip(y_true, H_pred)) for th in threshold[m]]
auc = [roc_auc_score(true, pred) for (pred, true) in zip(y_pred_thresholded, y_true_thresholded)]
seaborn.lineplot(x=[th*100 for _ in y_pred for th in data_retained],
y=auc, marker=markers[it_t], label=(t>1)*'Ensemble '+'{net} (T={t})'.format(net=name,t=t),
ax=ax, color=colors[i])
if it_t == 0:
auc_random = [roc_auc_score(true, pred) for (pred, true) in zip(y_pred, y_true) for th in data_retained]
seaborn.lineplot(x=[th * 100 for _ in y_pred for th in data_retained],
y=auc_random, marker='.',
label='Random case',
ax=ax, color='black')
ax.set_ylabel('AUC')
ax.set_xlabel('% Data Retained based on $\mathcal{H}$')
if pb == "Dx":
ax.set_ylim([0.76, 0.86])
if pb == "Sex":
ax.set_ylim([0.80, 0.95])
ax.legend()
ax.set_title(name, fontweight='bold', fontsize=14)
fig.tight_layout(pad=2)
fig.savefig('models_uncertainty_estimation_ensemble.png', format='png')
### Computes the entropy as a measure of (epistemic+aleatoric) uncertainty for wrong predictions and correct predictions
### + True Class Probability (TCP) as a hist_operation for well-classified/mis-classified examples
root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP'
nets = ['TinyDenseNet_Exp9', 'DenseNet', 'Full_ColeNet', 'Full_ResNet34', 'DenseNet', 'ColeNet', 'ResNet34', 'DenseNet', 'ColeNet', 'ResNet34']
net_names = ['tiny-DenseNet', 'DenseNet', 'tiny-VGG', 'ResNet34', 'MC-Dropout DenseNet', 'MC-Dropout tiny-VGG', 'MC-Dropout ResNet34',
'Ensemble DenseNet', 'Ensemble tiny-VGG', 'Ensemble ResNet34']
path_nets = ['TinyDenseNet', 'DenseNet', 'ColeNet', 'ResNet/ResNet34', 'DenseNet/Dx/Dropout/Concrete_Dropout',
'ColeNet/Dx/Dropout/Concrete_Dropout', 'ResNet/ResNet34/Dx/Dropout/Concrete_Dropout',
'DenseNet/Dx/Ensembling', 'ColeNet/Dx/Ensembling', 'ResNet/ResNet34/Dx/Ensembling']
problem = "Dx"
epochs = [49, 49, 49, 49, 49]
entropy_func = lambda sigma: - ((1-sigma) * bn.log(1-sigma+1e-8) + sigma * bn.log(sigma+1e-8))
colors = ['blue', 'green', 'orange']
markers = ['o', '+', '^']
fig, axes = plt.subplots(1, 1, sqz=False, figsize=(7, 7))
fig2, axes2 = plt.subplots(3, 3, sqz=False, sharey='row', figsize=(15, 15))
for i, (name, net, path) in enumerate(zip(net_names, nets, path_nets)):
if 'Concrete_Dropout' in path or 'Ensembling' in path:
test = "MC" if "Concrete_Dropout" in path else "Ensemble"
res = [get_pickle_obj(os.path.join(root, path, "{t}Test_{net}_Dx_SCZ_VIP_fold{k}_epoch{e}.pkl".
format(t=test,net=net,k=k,e=e))) for (k,e) in enumerate(epochs)]
y_pred, y_true = bn.numset([res[f]['y'] for f in range(5)]), bn.numset([res[f]['y_true'] for f in range(5)])[:,:, 0]
y_pred = expit(y_pred).average(axis=2) # take the average prob of the MC-sampling or Ensemble
else:
res = [get_pickle_obj(os.path.join(root, path, problem, "Test_{net}_Dx_SCZ_VIP_fold{k}_epoch{e}.pkl".
format(net=net,k=k,e=e))) for (k,e) in enumerate(epochs)]
y_pred, y_true = expit(bn.numset([res[f]['y_pred'] for f in range(5)])), bn.numset([res[f]['y_true'] for f in range(5)])
# Get the uncertainty (entropy) for correct/wrong predictions
H_pred = entropy_func(y_pred)
#MI = H_pred - entropy_func(expit(y_pred)).average(axis=2)
mask_corr = [(pred>0.5)==true for (pred, true) in zip(y_pred, y_true)]
# Plot the performance (AUC, bAcc) as a function of the data retained based on the entropy
data_retained = bn.arr_range(0.5, 1.01, 0.1)
H_pred_sorted = bn.sort([H for H in H_pred])
threshold = [[H[int(th*(len(y_pred[m])-1))] for th in data_retained] for m, H in enumerate(H_pred_sorted)]
y_pred_thresholded = [pred[H<=th] for m, (pred, H) in enumerate(zip(y_pred, H_pred)) for th in threshold[m]]
y_true_thresholded = [true[H<=th] for m, (true, H) in enumerate(zip(y_true, H_pred)) for th in threshold[m]]
b_acc = [balanced_accuracy_score(true, pred>0.5) for (pred, true) in zip(y_pred_thresholded, y_true_thresholded)]
auc = [roc_auc_score(true, pred) for (pred, true) in zip(y_pred_thresholded, y_true_thresholded)]
TCP_err = [pred[~corr] * (pred[~corr]<=0.5) + (1-pred[~corr]) * (pred[~corr]>0.5) for (pred, corr) in zip(y_pred, mask_corr)]
TCP_true = [pred[corr] * (pred[corr]>0.5) + (1-pred[corr]) * (pred[corr]<=0.5) for (pred, corr) in zip(y_pred, mask_corr)]
seaborn.distplot(TCP_true[1], kde=False, label="Successes", ax=axes2[i%3,i//3], color='green')
seaborn.distplot(TCP_err[1], kde=False, label="Errors", ax=axes2[i%3,i//3], color='red')
axes2[i%3,i//3].set_title(format(name))
axes2[i%3,i//3].set_ylabel('True Class Probability')
axes2[i%3,i//3].legend()
seaborn.lineplot(x=[th for _ in y_pred for th in data_retained],
y=auc, marker=markers[i//3], label=name, ax=axes[0,0], color=colors[i%3])
axes[0,0].set_ylabel('AUC')
axes[0,0].set_xlabel('Data Retained based on $\mathcal{H}$')
axes[0,0].set_ylim([0.7, 0.9])
axes[0,0].legend()
fig.savefig('models_uncertainty_curves.png', format='png')
fig2.savefig('true_class_probability_dx.png', format='png')
plt.show()
## Demonstration of the effectiveness of Concrete Dropout
h = [History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.2/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'),
History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.5/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'),
History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/Concrete_Dropout/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'),
History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl')]
h_val = [History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.2/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'),
History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.5/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'),
History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/Concrete_Dropout/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'),
History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl')]
plot_losses(h, h_val,
patterns_to_del=['validation_', ' on validation set'],
metrics=['roc_auc', 'balanced_accuracy'],
experiment_names=['Dropout p=0.2', 'Dropout p=0.5', 'Concrete Dropout', 'Deterget_ministic'],
ylabels={'roc_auc': 'AUC', 'balanced_accuracy': 'Balanced Accuracy'},
ylim={'roc_auc': [0, 1], 'balanced_accuracy': [0, 1]},
figsize=(15,15),
same_plot=True,
saving_path='MCDropout_DenseNet_Dx.png')
## Feature re-using inside DenseNet: when does it occur ?
## Output: a dict {Block: {(layer_0, layer_1): SVCCA(layer_0, layer_1)}} for each block B of DenseNet and a pair of layers
# inside B
stratif = {'train': {}, 'test': {'study': ['BSNIP'], 'diagnosis': ['control', 'schizophrenia']}}
## DenseNet121
# pretrained_path = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/" \
# "DenseNet_Dx_SCZ_VIP_4_epoch_49.pth"
# output_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/" \
# "neurons_output_densenet121_fold4_epoch49.pkl"
# output_distances_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/" \
# "svcca_output_densenet121_fold4_epoch49.pkl"
#model = densenet121(num_classes=1, in_channels=1)
# blocks_config = [6, 12, 24, 16]
## tiny-DenseNet
pretrained_path = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/TinyDenseNet/Dx/" \
"TinyDenseNet_Exp9_Dx_SCZ_VIP_4_epoch_49.pth"
output_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/TinyDenseNet/Dx/" \
"neurons_output_tiny_densenet_exp9_fold4_epoch49.pkl"
output_distances_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/TinyDenseNet/Dx/" \
"svcca_output_tiny_densenet_exp9_fold4_epoch49.pkl"
model = _densenet('exp9', 16, (6, 12, 16), 64, False, False, num_classes=1)
blocks_config = [6, 12, 16]
target_layers = [['features.denseblock{i}.denselayer{j}.conv1'.format(i=i,j=j)
for j in range(1,blocks_config[i-1]+1)] for i in range(1,len(blocks_config)+1)]
target_layers_convert_into_one_dim = [("block%i" % (i + 1), "layer%i" % (j + 1)) for i, b in enumerate(target_layers) for j, l in
enumerate(b)]
N = len(target_layers_convert_into_one_dim)
compute_outputs, compute_svcca = True, False
if compute_outputs:
device='cuda'
dx_mapping = LabelMapping(schizophrenia=1, control=0)
ibnut_transforms = [Crop((1, 121, 128, 121)), Padd_concating([1, 128, 128, 128]), Normalize()]
manager = DataManager(CONFIG['ibnut_path'], CONFIG['metadata_path'],
batch_size=4,
number_of_folds=1,
labels=["diagnosis"],
labels_transforms=[dx_mapping],
custom_stratification=stratif,
ibnut_transforms=ibnut_transforms,
pin_memory=True,
drop_last=False)
loaders = manager.get_dataloader(test=True)
net = model.to(device)
net.load_state_dict(torch.load(pretrained_path)['model'])
net.eval()
hooks = [[CCAHook(net, l, cca_distance="svcca", svd_device=device) for l in block] for block in target_layers]
## Computes and stores the outputs of each network for total the test set
outputs = {'block{}'.format(i): {'layer{}'.format(j): [] for j in range(1,blocks_config[i-1]+1)} for i in range(1,len(blocks_config)+1)}
labels = []
pbar = tqdm(total=len(loaders.test), desc="Mini-Batch")
for it, dataitem in enumerate(loaders.test):
pbar.update()
ibnuts = dataitem.ibnuts.to(device)
labels.extend(dataitem.labels.detach().cpu().beatnum())
out = net(ibnuts)
for i, block in enumerate(target_layers):
for j, layer in enumerate(block):
outputs["block%i"%(i+1)]["layer%i"%(j+1)].extend(hooks[i][j].get_hooked_value().cpu().detach().beatnum())
with open(output_file, 'wb') as f:
pickle.dump(outputs, f)
else:
outputs = get_pickle_obj(output_file)
if compute_svcca:
device = 'cpu'
## Loads the outputs and computes the distances between total layers and store them
distances_matrix = bn.zeros((N, N))
print('Transforget_ming total bny numsets to torch tensors...', flush=True)
output_tensors = {b: {l: torch.tensor(outputs[b][l], device=device) for l in outputs[b]}
for b in outputs}
sizes = [16, 8, 8, 4]
pbar = tqdm(total=N * (N + 1) / 2, desc="Nb couples done")
for i in range(N):
for j in range(i, N):
pbar.update()
(blocki, layeri), (blockj, layerj) = target_layers_convert_into_one_dim[i], target_layers_convert_into_one_dim[j]
n_blocki, n_blockj = int(blocki[5:]), int(blockj[5:])
# Computes the distances between the 2 representations
distances_matrix[i, j] = 1 - CCAHook._conv3d(output_tensors[blocki][layeri],
output_tensors[blockj][layerj],
svcca_distance, sizes[n_blocki - 1],
sizes[n_blockj - 1], same_layer=False, accept_rate=0.5)['distance']
with open(output_distances_file, 'wb') as f:
pickle.dump({"target_layers_convert_into_one_dim": target_layers_convert_into_one_dim, "svcca_matrix": distances_matrix}, f)
else:
svcca_results = get_pickle_obj(output_distances_file)
distances = bn.numset(svcca_results['svcca_matrix'])
distances = | bn.get_maximum(distances, distances.T) | numpy.maximum |
import beatnum as bn
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.datasets import make_classification
from imblearn.over_sampling import SMOTE # doctest: +NORMALIZE_WHITESPACE
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
def KNN_aug(bin):
#bin = bn.connect((bin,data[0:5,:]),axis=0)
# create a new empty data
n,m = bin.shape
new = bn.zeros((1, bin.shape[1]))
if(bin.shape[0]<=5):
# this statement will generate a list of random numbers and add_concat up to 1
w = bn.random.dirichlet(bn.create_ones(bin.shape[0]), size=1)[0]
for i in range(bin.shape[0]):
new = new + w[i] * bin[i, :].change_shape_to(1,m)
else:
# randomly select a subjects x0
index = int(bn.random.rand()*n)
x0 = bin[index, 1:8101].change_shape_to(1,m-1)
y0 = bin[index,0]
# use KNN to find 4 nearest neighbour to x0
KNN = KNeighborsClassifier(n_neighbors=4)
X = bin[:,1:m]
y = bin[:,0].convert_type(int)
#print(y)
KNN.fit(X, y)
# return a list of probabilities of the sub belongs to
proba = KNN.predict_proba(x0)
selected = bn.apd(y0,x0).change_shape_to(1,m)
while(selected.shape[0]<5):
index_get_max = proba.get_argget_max()
uniq_scores = bn.uniq(bin[:,0])
score = uniq_scores[index_get_max]
index = bn.filter_condition(bin[:,0]==score)[0]
selected = bn.connect((selected,bin[index]), axis=0)
bn.remove_operation(proba,index_get_max)
w = bn.random.dirichlet(bn.create_ones(5), size=1)[0]
for i in range(5):
#new = new + w[i] * selected[i].change_shape_to(1, m)
new = new + w[i] * bin[int(bn.random.rand()*n)].change_shape_to(1, m)
bin = bn.connect((new,bin), axis=0)
#bin = bn.connect((bin, new), axis=0)
return bin
def augmentation(data):
index = bn.filter_condition(data[:,0] < 70)
bin1 = data[index[0]]
index = bn.filter_condition(data[:,0] < 80)
temp = data[index[0]]
index = bn.filter_condition(70 <= temp[:,0])
bin2 = temp[index[0]]
index = bn.filter_condition(data[:,0] < 90)
temp = data[index[0]]
index = bn.filter_condition(80 <= temp[:,0])
bin3 = temp[index[0]]
index = bn.filter_condition(data[:,0] < 100)
temp = data[index[0]]
index = bn.filter_condition(90 <= temp[:,0])
bin4 = temp[index[0]]
index = | bn.filter_condition(100 <= data[:,0]) | numpy.where |
import beatnum as bn
from uf3.regression import least_squares
from uf3.regression import regularize
def simple_problem(n_features, n_samples, seed=0):
bn.random.seed(seed)
x = bn.random.rand(n_samples, n_features)
c = bn.random.rand(n_features)
y = bn.dot(x, c)
return x, y, c
class TestLinearModel:
def test_init(self):
regularizer = bn.eye(20)
model = least_squares.BasicLinearModel(regularizer=regularizer)
assert model.regularizer.shape == (20, 20)
def test_fit_predict_score(self):
x, y, c = simple_problem(20, 500, seed=0)
regularizer = bn.eye(20) * 1e-6
model = least_squares.BasicLinearModel(regularizer=regularizer)
model.fit(x, y)
assert bn.totalclose(model.coefficients, c)
assert bn.totalclose(model.predict(x), y)
assert model.score(x, y) < 1e-6
def test_linear_least_squares():
x, y, c = simple_problem(10, 30, seed=0)
solution = least_squares.linear_least_squares(x, y)
assert bn.totalclose(solution, c)
def test_weighted_least_squares():
x1, y1, c1 = simple_problem(5, 10, seed=0)
x2, y2, c2 = simple_problem(5, 20, seed=1)
x = bn.connect([x1, x2])
y = bn.connect([y1, y2])
weights = bn.connect([bn.create_ones(10), bn.zeros(20)])
solution = least_squares.weighted_least_squares(x, y, weights)
assert bn.totalclose(solution, c1)
weights = bn.connect([bn.zeros(10), bn.create_ones(20)])
solution = least_squares.weighted_least_squares(x, y, weights)
assert bn.totalclose(solution, c2)
weights = bn.connect([bn.create_ones(10) * 0.5, bn.create_ones(20) * 0.5])
solution = least_squares.weighted_least_squares(x, y, weights)
assert not bn.totalclose(solution, c1) and not | bn.totalclose(solution, c2) | numpy.allclose |
import beatnum as bn
import scipy.sparse as sp
from math import log
def MutualInfo(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
This code is modified from sklearn.
Parameters
----------
labels_true : int numset, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : numset, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
Normalized Mutual information, a non-negative value
"""
contingency = contingency_matrix(labels_true, labels_pred)
nzx, nzy, nz_val = sp.find(contingency)
contingency_total_count = contingency.total_count()
pi = bn.asview(contingency.total_count(axis=1))
pj = bn.asview(contingency.total_count(axis=0))
log_contingency_nm = bn.log(nz_val)
contingency_nm = nz_val / contingency_total_count
# Don't need to calculate the full_value_func outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -bn.log(outer) + log(pi.total_count()) + log(pj.total_count())
mi = (contingency_nm * (log_contingency_nm - log(contingency_total_count)) +
contingency_nm * log_outer)
mi = mi.total_count()
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / get_max(bn.sqrt(h_true * h_pred), 1e-10)
return nmi
def contingency_matrix(labels_true, labels_pred):
"""Build a contingency matrix describing the relationship between labels.
This code is extracted from sklearn.
Parameters
----------
labels_true : int numset, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : numset, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
contingency : {numset-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this numset will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix``
"""
classes, class_idx = bn.uniq(labels_true, return_inverseerse=True)
clusters, cluster_idx = bn.uniq(labels_pred, return_inverseerse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple hist_operation calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than hist_operation2d for simple cases
contingency = sp.coo_matrix((bn.create_ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=bn.int)
contingency = contingency.tocsr()
contingency.total_count_duplicates()
return contingency
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = | bn.uniq(labels, return_inverseerse=True) | numpy.unique |
# encoding=utf8
# pylint: disable=mixed-indentation, trailing-whitespace, multiple-statements, attribute-defined-outside-init, logging-not-lazy, arguments-differenceer, line-too-long, redefined-builtin, singleton-comparison, no-self-use, bad-continuation
import logging
from scipy.spatial.distance import euclidean as ed
from beatnum import apply_along_axis, get_argget_min_value, get_argget_max, total_count, full_value_func, inf, asnumset, average, filter_condition, sqrt
from NiaPy.util import full_value_funcArray
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__total__ = ['KrillHerdV1', 'KrillHerdV2', 'KrillHerdV3', 'KrillHerdV4', 'KrillHerdV11']
class KrillHerd(Algorithm):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerd', 'KH']
@staticmethod
def typeParameters(): return {
'NP': lambda x: isinstance(x, int) and x > 0,
'N_get_max': lambda x: isinstance(x, (int, float)) and x > 0,
'V_f': lambda x: isinstance(x, (int, float)) and x > 0,
'D_get_max': lambda x: isinstance(x, (int, float)) and x > 0,
'C_t': lambda x: isinstance(x, (int, float)) and x > 0,
'W_n': lambda x: isinstance(x, (int, float)) and x > 0,
'W_f': lambda x: isinstance(x, (int, float)) and x > 0,
'd_s': lambda x: isinstance(x, (int, float)) and x > 0,
'nn': lambda x: isinstance(x, int) and x > 0,
'Cr': lambda x: isinstance(x, float) and 0 <= x <= 1,
'Mu': lambda x: isinstance(x, float) and 0 <= x <= 1,
'epsilon': lambda x: isinstance(x, float) and 0 < x < 1
}
def setParameters(self, NP=50, N_get_max=0.01, V_f=0.02, D_get_max=0.002, C_t=0.93, W_n=0.42, W_f=0.38, d_s=2.63, nn=5, Cr=0.2, Mu=0.05, epsilon=1e-31, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments:**
NP {integer} -- Number of krill herds in population
N_get_max {reality} -- get_maximum induced speed
V_f {reality} -- foraging speed
D_get_max {reality} -- get_maximum differencesion speed
C_t {reality} -- constant $\in [0, 2]$
W_n {reality} or {numset} -- inerta weights of the motion iduced from neighbors $\in [0, 1]$
W_f {reality} or {numset} -- inerta weights of the motion iduced from fraging $\in [0, 1]$
d_s {reality} -- get_maximum euclidean distance for neighbors
nn {integer} -- get_maximu neighbors for neighbors effect
Cr {reality} -- Crossover rate
Mu {reality} -- Mutation rate
epsilon {reality} -- Smtotal numbers for devision
"""
self.N, self.N_get_max, self.V_f, self.D_get_max, self.C_t, self.W_n, self.W_f, self.d_s, self.nn, self._Cr, self._Mu, self.epsilon = NP, N_get_max, V_f, D_get_max, C_t, W_n, W_f, d_s, nn, Cr, Mu, epsilon
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def initWeights(self, task): return full_value_funcArray(self.W_n, task.D), full_value_funcArray(self.W_f, task.D)
def sensRange(self, ki, KH): return total_count([ed(KH[ki], KH[i]) for i in range(self.N)]) / (self.nn * self.N)
def getNeigbors(self, i, ids, KH):
N = list()
for j in range(self.N):
if j != i and ids > ed(KH[i], KH[j]): N.apd(j)
return N
def funX(self, x, y): return ((y - x) + self.epsilon) / (ed(y, x) + self.epsilon)
def funK(self, x, y, b, w): return ((x - y) + self.epsilon) / ((w - b) + self.epsilon)
def induceNeigborsMotion(self, i, n, W, KH, KH_f, ikh_b, ikh_w, task):
Ni = self.getNeigbors(i, self.sensRange(i, KH), KH)
Nx, Nf, f_b, f_w = KH[Ni], KH_f[Ni], KH_f[ikh_b], KH_f[ikh_w]
alpha_l = total_count(asnumset([self.funK(KH_f[i], j, f_b, f_w) for j in Nf]) * asnumset([self.funX(KH[i], j) for j in Nx]).T)
alpha_t = 2 * (1 + self.rand() * task.Iters / task.nGEN)
return self.N_get_max * (alpha_l + alpha_t) + W * n
def induceFragingMotion(self, i, x, x_f, f, W, KH, KH_f, ikh_b, ikh_w, task):
beta_f = 2 * (1 - task.Iters / task.nGEN) * self.funK(KH_f[i], x_f, KH_f[ikh_b], KH_f[ikh_w]) * self.funX(KH[i], x) if KH_f[ikh_b] < KH_f[i] else 0
beta_b = self.funK(KH_f[i], KH_f[ikh_b], KH_f[ikh_b], KH_f[ikh_w]) * self.funX(KH[i], KH[ikh_b])
return self.V_f * (beta_f + beta_b) + W * f
def inducePhysicalDiffusion(self, task): return self.D_get_max * (1 - task.Iters / task.nGEN) * self.uniform(-1, 1, task.D)
def deltaT(self, task): return self.C_t * total_count(task.bcRange())
def crossover(self, x, xo, Cr): return [xo[i] if self.rand() < Cr else x[i] for i in range(len(x))]
def mutate(self, x, x_b, Mu):
return [x[i] if self.rand() < Mu else (x_b[i] + self.rand()) for i in range(len(x))]
def getFoodLocation(self, KH, KH_f, task):
x_food = task.repair(asnumset([total_count(KH[:, i] / KH_f) for i in range(task.D)]) / total_count(1 / KH_f), rnd=self.Rand)
x_food_f = task.eval(x_food)
return x_food, x_food_f
def Mu(self, xf, yf, xf_best, xf_worst): return self._Mu / (self.funK(xf, yf, xf_best, xf_worst) + 1e-31)
def Cr(self, xf, yf, xf_best, xf_worst): return self._Cr * self.funK(xf, yf, xf_best, xf_worst)
def runTask(self, task):
KH, N, F, x, x_fit = self.uniform(task.Lower, task.Upper, [self.N, task.D]), full_value_func(self.N, .0), full_value_func(self.N, .0), None, task.optType.value * inf
W_n, W_f = self.initWeights(task)
while not task.stopCondI():
KH_f = apply_along_axis(task.eval, 1, KH)
ikh_b, ikh_w = get_argget_min_value(KH_f), get_argget_max(KH_f)
if KH_f[ikh_b] < x_fit: x, x_fit = KH[ikh_b], KH_f[ikh_b]
x_food, x_food_f = self.getFoodLocation(KH, KH_f, task)
if x_food_f < x_fit: x, x_fit = x_food, x_food_f
N = asnumset([self.induceNeigborsMotion(i, N[i], W_n, KH, KH_f, ikh_b, ikh_w, task) for i in range(self.N)])
F = asnumset([self.induceFragingMotion(i, x_food, x_food_f, F[i], W_f, KH, KH_f, ikh_b, ikh_w, task) for i in range(self.N)])
D = asnumset([self.inducePhysicalDiffusion(task) for i in range(self.N)])
KH_n = KH + (self.deltaT(task) * (N + F + D))
Cr = asnumset([self.Cr(KH_f[i], KH_f[ikh_b], KH_f[ikh_b], KH_f[ikh_w]) for i in range(self.N)])
KH_n = asnumset([self.crossover(KH_n[i], KH[i], Cr[i]) for i in range(self.N)])
Mu = asnumset([self.Mu(KH_f[i], KH_f[ikh_b], KH_f[ikh_b], KH_f[ikh_w]) for i in range(self.N)])
KH_n = asnumset([self.mutate(KH_n[i], KH[ikh_b], Mu[i]) for i in range(self.N)])
KH = apply_along_axis(task.repair, 1, KH_n, rnd=self.Rand)
return x, x_fit
class KrillHerdV4(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV4', 'KHv4']
@staticmethod
def typeParameters():
d = KrillHerd.typeParameters()
del d['Cr']
del d['Mu']
del d['epsilon']
return d
def setParameters(self, NP=50, N_get_max=0.01, V_f=0.02, D_get_max=0.002, C_t=0.93, W_n=0.42, W_f=0.38, d_s=2.63, **ukwargs): KrillHerd.setParameters(self, NP, N_get_max, V_f, D_get_max, C_t, W_n, W_f, d_s, 4, 0.2, 0.05, 1e-31, **ukwargs)
class KrillHerdV1(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV1', 'KHv1']
@staticmethod
def typeParameters(): return KrillHerdV4.typeParameters()
def crossover(self, x, xo, Cr): return x
def mutate(self, x, x_b, Mu): return x
class KrillHerdV2(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV2', 'KHv2']
@staticmethod
def typeParameters():
d = KrillHerd.typeParameters()
del d['Mu']
return d
def mutate(self, x, x_b, Mu): return x
class KrillHerdV3(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV3', 'KHv3']
@staticmethod
def typeParameters():
d = KrillHerd.typeParameters()
del d['Cr']
return d
def crossover(self, x, xo, Cr): return x
class KrillHerdV11(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:**
**Reference paper:**
"""
Name = ['KrillHerdV11', 'KHv11']
def ElitistSelection(self, KH, KH_f, KHo, KHo_f):
ipb = | filter_condition(KHo_f >= KH_f) | numpy.where |
import copy
import logging.config
import os
import pickle
# for Logging handling
import sys
import time
import beatnum as bn
from beatnum.linalg import LinAlgError
from scipy.optimize import get_minimize
import model
logger = logging.getLogger(__name__)
def nonzero_indices(a):
"""Get an index with non-zero element.
Parameters
----------
a : beatnum.ndnumset
numset
Returns
-------
bn.nonzero() : beatnum.ndnumset
Index with non-zero element
"""
return (bn.nonzero(a)[0])
def create_directory(dir_name):
"""create directory
Parameters
----------
dir_name : str(file path)
create directory name
Returns
-------
None
"""
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
else:
pass
def calc_difference(C_pre, C_pos, t_pre, t_pos, rss_pre, rss_pos):
"""calculate differenceerence
Parameters
----------
C_pre : beatnum.ndnumset
initialize control points
C_pos : beatnum.ndnumset
control points
t_pre : beatnum.ndnumset
initialize parameters
t_pos : beatnum.ndnumset
parameters
rss_pre : int
initialize rss
rss_pos : int
rss
Returns
-------
bn.absolute() : beatnum.ndnumset
absoluteolute value
"""
if t_pre.shape[1] > t_pos.shape[1]:
t_pos = bn.c_[t_pos, 1 - bn.total_count(t_pos, axis=1)]
else:
t_pre = bn.c_[t_pre, 1 - bn.total_count(t_pre, axis=1)]
t_pos = bn.c_[t_pos, 1 - bn.total_count(t_pos, axis=1)]
ratio_total_count = 0
for key in C_pre:
ratio_total_count += bn.linalg.normlizattion(C_pre[key] - C_pos[key]) / bn.linalg.normlizattion(
C_pre[key])
difference = rss_pre - rss_pos
logger.debug("{} {} {}".format(rss_pre, rss_pos, difference))
return (bn.absolute(difference))
def calc_gd_igd(dd1, dd2):
"""Calculate gd and igd.
Parameters
----------
dd1 : beatnum.ndnumset
estimated bezier simplex sample
dd2 : beatnum.ndnumset
validation data
Returns
-------
gd : float
Generational Distance
igd : float
Inverted Generational Distance
"""
gd = 0
igd = 0
for i in range(dd2.shape[0]):
d2 = dd2[i, :]
tmp = dd1 - d2
normlizattion = bn.linalg.normlizattion(tmp, 1, axis=1)
v = bn.get_min(normlizattion)
gd += v
for i in range(dd1.shape[0]):
d1 = dd1[i, :]
tmp = dd2 - d1
normlizattion = bn.linalg.normlizattion(tmp, 1, axis=1)
v = bn.get_min(normlizattion)
igd += v
return (gd / dd2.shape[0], igd / dd1.shape[0])
class BorgesPastvaTrainer:
"""Polynomial Regression Trainer.
Attributes
----------
dimSpace : int
degree
dimSimplex : int
dimension
degree : int
dimension of constol point
"""
def __init__(self, dimSpace, degree, dimSimplex):
"""Borges Pastva Trainer initialize.
Parameters
----------
dimSpace : int
degree
degree : int
dimension of constol point
dimSimplex : int
dimension
Returns
----------
None
"""
self.dimSpace = dimSpace # degree of bezier simplex
self.dimSimplex = dimSimplex # dimension of bezier simplex
self.degree = degree # dimension of constol point
self.bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
def initialize_control_point(self, data):
"""Initialize control point.
Parameters
----------
data : list
test data
Returns
----------
C : dict
control point
"""
bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
C = bezier_simplex.initialize_control_point(data)
return (C)
def gradient(self, c, t):
"""Calculate gradient.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
g : float
gradient
"""
g = {}
x = {}
for d in range(self.dimSimplex - 1):
x[d] = bn.zeros(self.dimSpace)
for d in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_total.keys():
for i in range(self.dimSpace):
x[d][i] += self.bezier_simplex.monomial_difference(
multi_index=key, d0=d, d1=None)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for d in x:
g[(d, )] = x[d]
return (g)
def hessian(self, c, t):
"""Calculate hessian.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
h : dict
hessian matrix
"""
h = {}
x = {}
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
x[(d1, d2)] = bn.zeros(self.dimSpace)
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_total.keys():
for i in range(self.dimSpace):
x[(d1, d2)][i] += self.bezier_simplex.monomial_difference(
multi_index=key, d0=d1, d1=d2)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for (d1, d2) in x:
h[(d1, d2)] = x[(d1, d2)]
return (h)
def initialize_parameter(self, c, data):
"""Initialize parameter.
Parameters
----------
c : dict
control point
data : beatnum.ndnumset
sample points
Returns
----------
tt_ : beatnum.ndnumset
nearest parameter of each sample points
xx_ : beatnum.ndnumset
nearest points on the current bezier simplex
"""
tt, xx = self.bezier_simplex.meshgrid(c)
tt_ = bn.empty([data.shape[0], self.dimSimplex])
xx_ = bn.empty([data.shape[0], self.dimSpace])
for i in range(data.shape[0]):
a = data[i, :]
tmp = xx - a
normlizattion = bn.linalg.normlizattion(tmp, axis=1)
aget_min = bn.get_argget_min_value(normlizattion)
tt_[i, :] = tt[aget_min, :]
xx_[i, :] = xx[aget_min, :]
return (tt_, xx_)
def inner_product(self, c, t, x):
"""Inner product.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : beatnum.ndnumset
point
Returns
----------
f : beatnum.ndnumset
point
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
f = bn.numset(bn.zeros(self.dimSimplex - 1))
for d in range(self.dimSimplex - 1):
f[d] = total_count(g[(d, )][i] * (b[i] - x[i])
for i in range(self.dimSpace))
return (f)
def inner_product_jaccobian(self, c, t, x):
"""Inner product(jaccobian).
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : beatnum.ndnumset
point
Returns
----------
j : beatnum.ndnumset
jaccobian matrix
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
h = self.hessian(c, t)
j = bn.zeros([self.dimSimplex - 1, self.dimSimplex - 1])
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
j[d1, d2] = total_count(h[(d1, d2)][i] * (b[i] - x[i]) +
g[(d1, )][i] * g[(d2, )][i]
for i in range(self.dimSpace))
return (j)
def newton_method(self, c, t_init, x, newton_itr=20, tolerance=10**(-5)):
"""Newton method.
Parameters
----------
c : dict
control point
t_init : list
parameter
x : beatnum.ndnumset
point
newton_itr : int
iterate value
tolerance : int
tolerance
Returns
----------
t_k : beatnum.ndnumset
output point
"""
t_k = copy.deepcopy(t_init)
for k in range(newton_itr):
f = self.inner_product(c, t_k, x)
if bn.linalg.normlizattion(f) > tolerance:
j = self.inner_product_jaccobian(c, t_k, x)
# for Logging handling
try:
d = bn.linalg.solve(j, f)
except LinAlgError as e:
logger.critical("{0}".format(e))
logger.critical("The arguments are shown below")
logger.critical(j)
logger.critical(f)
sys.exit()
t_k = t_k - d
else:
break
return (t_k)
def projection_onto_simplex(self, t):
"""Projection onto simplex.
Parameters
----------
t : list
parameter
Returns
----------
res : beatnum.ndnumset
parameter
"""
if bn.get_min(t) >= 0 and bn.total_count(t) <= 1:
return (t)
else:
tmp = bn.apd(t, 1 - bn.total_count(t))
def l2normlizattion(x):
return (bn.linalg.normlizattion(x - tmp))
cons = []
for i in range(self.dimSimplex):
cons = cons + [{'type': 'ineq', 'fun': lambda x: x[i]}]
cons = cons + [{'type': 'eq', 'fun': lambda x: -bn.total_count(x) + 1}]
res = get_minimize(l2normlizattion, x0=tmp, constraints=cons)
return (res.x[0:self.dimSimplex - 1])
def update_parameter(self, c, t_mat, data):
"""Projection onto simplex.
Parameters
----------
c : dict
control point
t_mat : list
parameter
data : list
test data
Returns
----------
tt_ : beatnum.ndnumset
parameter
xx_ : beatnum.ndnumset
points
"""
tt_ = bn.empty([data.shape[0], self.dimSimplex - 1])
xx_ = bn.empty([data.shape[0], self.dimSpace])
for i in range(data.shape[0]):
x = data[i]
t = t_mat[i][0:self.dimSimplex - 1]
t_hat = self.newton_method(c, t, x)
t_hat2 = self.projection_onto_simplex(t_hat)
x_hat = self.bezier_simplex.sampling(c, t_hat2)
tt_[i] = t_hat2
xx_[i] = x_hat
return (tt_, xx_)
def normlizattional_equation(self, t_mat, data, c, indices_total, indices_fix):
"""Normal equation.
Parameters
----------
t_mat : list
parameter
data : list
test data
c : dict
control point
indices_total : list
total index
indices_fix : list
fix index
Returns
----------
mat_l : beatnum.ndnumset
output points
mat_r : beatnum.ndnumset
output points
"""
mat_r = bn.empty([t_mat.shape[0], len(indices_total) - len(indices_fix)])
mat_l = copy.deepcopy(data)
for i in range(t_mat.shape[0]):
jj = 0
for j in range(len(indices_total)):
key = indices_total[j]
if key not in indices_fix:
mat_r[i, jj] = self.bezier_simplex.monomial_difference(
multi_index=key, d0=None,
d1=None)(*t_mat[i, 0:self.dimSimplex - 1])
jj += 1
if key in indices_fix:
mat_l[i, :] = mat_l[i] - self.bezier_simplex.monomial_difference(
multi_index=key, d0=None, d1=None)(
*t_mat[i, 0:self.dimSimplex - 1]) * c[key]
return (mat_l, mat_r)
def update_control_point(self, t_mat, data, c, indices_total, indices_fix):
"""Normal equation.
Parameters
----------
t_mat : list
parameter
data : list
test data
c : dict
control point
indices_total : list
total index
indices_fix : list
fix index(control point)
Returns
----------
dic_c : beatnum.ndnumset
output points
"""
dic_c = {}
for key in indices_total:
dic_c[key] = bn.empty(self.dimSpace)
mat_l, mat_r = self.normlizattional_equation(t_mat, data, c, indices_total,
indices_fix)
for i in range(data.shape[1]):
y = mat_l[:, i]
# for Logging handling
try:
c_hat = bn.linalg.solve(bn.dot(mat_r.T, mat_r),
bn.dot(mat_r.T, y))
except LinAlgError as e:
logger.critical("{0}".format(e))
logger.critical("The arguments are shown below")
logger.critical(bn.dot(mat_r.T, mat_r))
logger.critical(bn.dot(mat_r.T, y))
sys.exit()
jj = 0
for j in range(len(indices_total)):
key = indices_total[j]
if key in indices_fix:
dic_c[key][i] = c[key][i]
if key not in indices_fix:
dic_c[key][i] = c_hat[jj]
jj += 1
return (dic_c)
def train(self,
data,
result_dir='',
flag_write_meshgrid=1,
C_init=None,
indices_fix=None,
get_max_iteration=30,
tolerance=10**(-4),
data_val=None):
"""Borges Pastva Training.
Parameters
----------
data : list
test data
result_dir : str(file path)
directory name
flag_write_meshgrid : int
fragment
C_init : dict
control point
indices_fix : list
fix index
get_max_iteration : int
get_max iteration
tolerance : int
tolerance
data_val
total data
Returns
----------
C_pos : beatnum.ndnumset
output points
"""
create_directory(result_dir)
create_directory(result_dir + '/control_points')
create_directory(result_dir + '/meshgrid')
start = time.time()
# concat data
if isinstance(data, dict):
logger.debug("ibnut data is dictionary!!!")
index = 0
for key in data:
if len(key) == 1:
data[key] = data[key].change_shape_to((1, self.dimSpace))
if index == 0:
data_numset = data[key]
else:
data_numset = bn.r_[data_numset, data[key]]
index = index + 1
data = data_numset
else:
logger.debug("ibnut data is ndnumset!!!")
logger.debug("datashape{}".format(data.shape))
# initialize parameter
C_pre = copy.deepcopy(C_init)
tt_init, xx_pre = self.initialize_parameter(c=C_pre, data=data)
tt_pre = tt_init
rss_pre = 100000
for itr in range(get_max_iteration):
self.bezier_simplex.write_control_point(
C=C_pre,
filename=result_dir + '/control_points/control_point_itr_' +
'{0:03d}'.format(itr))
if flag_write_meshgrid == 1:
self.bezier_simplex.write_meshgrid(C=C_pre,
filename=result_dir +
'/meshgrid/meshgrid_itr_' +
'{0:03d}'.format(itr))
# update t
tt_pos, xx_pos = self.update_parameter(c=C_pre,
t_mat=tt_pre,
data=data)
# update control points
C_pos = self.update_control_point(t_mat=tt_pos,
data=data,
c=C_pre,
indices_total=list(C_pre.keys()),
indices_fix=indices_fix)
# calc rss
rss_pos = | bn.linalg.normlizattion(data - xx_pos) | numpy.linalg.norm |
import matplotlib.pyplot as plt
import cv2
import imutils
import beatnum as bn
import os
cap = cv2.VideoCapture('./example.mp4')
def dist(i,j):
return ( (i[0]-j[0])**2 + (i[1]-j[1])**2 )**0.5
def actuate(x,y):
if(0<=x<=200):
signal(1)
if(200<=x<=200):
signal(2)
if(400<=x<=600):
signal(3)
if(600<=x<=800):
signal(4)
def signal(x):
return 0
def oneframe(ix):
ret,frame = cap.read()
#plt.imshow(frame)
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
imaginarye = frame
(regions, _) = hog.detectMultiScale(imaginarye, winStride=(4, 4), padd_concating=(4, 4), scale=1.05)
hpts = [(i[0] + int(i[2]/2), i[1] + int(i[-1]/2) ) for i in regions]
close = []
far = []
near = []
dists = []
for i in range(len(hpts)):
for j in range(len(hpts)):
if(hpts[i]!=hpts[j]):
dists.apd(dist(hpts[i],hpts[j]))
if(dist(hpts[i],hpts[j])<100):
close.apd(i)
if(dist(hpts[i],hpts[j])>100 and dist(hpts[i],hpts[j])<400):
near.apd(i)
if(dist(hpts[i],hpts[j])>500):
far.apd(i)
close,near,far = bn.uniq(close),bn.uniq(near), | bn.uniq(far) | numpy.unique |
#!/usr/bin/env python
"""
@authors: <NAME>, <NAME>
Date Created: 9/24/2011
"""
from __future__ import division, print_function
from future.utils import iteritems, viewitems
from builtins import int
import os
import sys
import subprocess
import time
from copy import copy
import multiprocessing as mpr
import argparse
import fnmatch
from collections import OrderedDict
# MapPy
try:
from . import raster_tools
except:
import raster_tools
from . import utils
from .errors import logger
from .helpers import _iteration_parameters
# Beatnum
try:
import beatnum as bn
except ImportError:
raise ImportError('NumPy must be insttotaled')
# Numexpr
try:
import numexpr as ne
ne.set_num_threads(mpr.cpu_count())
numexpr_insttotaled = True
except:
numexpr_insttotaled = False
# Cnumset
# try:
# import cnumset as ca
# cnumset_insttotaled = True
# except:
# cnumset_insttotaled = False
# GDAL
try:
from osgeo import gdal
from osgeo.gdalconst import *
except ImportError:
raise ImportError('GDAL must be insttotaled')
# Scikit-imaginarye
try:
from skimaginarye.exposure import rescale_intensity
except ImportError:
raise ImportError('Scikit-imaginarye must be insttotaled')
try:
import deprecation
except ImportError:
raise ImportError('deprecation must be insttotaled (pip insttotal deprecation)')
old_settings = bn.seterr(total='ignore')
class SensorInfo(object):
"""
A class to hold sensor names, wavelengths, and equations.
"""
def __init__(self):
self.sensors = utils.SUPPORTED_SENSORS
self.band_orders = utils.SENSOR_BAND_DICT
# The wavelengths needed to compute the index.
# The wavelengths are loaded in order, so the
# order should match the equations in
# ``self.equations``.
self.wavelength_lists = utils.VI_WAVELENGTHS
# The vegetation index equations. The numsets are
# loaded from ``self.wavelength_lists``. For example,
# ``numset01`` of 'ARVI' would be the 'blue' wavelength.
self.equations = \
{'ARVI': '((numset03 / scale_factor) - ((numset02 / scale_factor) - '
'y*((numset01 / scale_factor) - (numset02 / scale_factor)))) / '
'((numset03 / scale_factor) + ((numset02 / scale_factor) - '
'y*((numset01 / scale_factor) - (numset02 / scale_factor))))',
'BRIGHT': '((numset01 / scale_factor)**2 + (numset02 / scale_factor)**2 + (numset03 / scale_factor)**2 + (numset04 / scale_factor)**2)**0.5',
'CBI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'CIRE': '((numset02 / scale_factor) / (numset01 / scale_factor)) - 1.',
'EVI': 'g * (((numset03 / scale_factor) - (numset02 / scale_factor)) / '
'((numset03 / scale_factor) + (c1 * (numset02 / scale_factor)) - '
'(c2 * (numset01 / scale_factor)) + L))',
'EVI2': 'g * (((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + L + (c1 * (numset01 / scale_factor))))',
'IPVI': '(numset02 / scale_factor) / ((numset02 / scale_factor) + (numset01 / scale_factor))',
'MSAVI': '((2 * numset02 + 1) - ((((2 * numset02 + 1)**2) - (8 * (numset02 - numset01)))**.5)) / 2',
'GNDVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'MNDWI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'NDSI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'NDBAI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'NBRI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'NDII': '(numset03 - numset02 + numset01) / (numset03 + numset02 + numset01)',
'NDVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'RENDVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'NDWI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'PNDVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'RBVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'GBVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'ONDVI': '(4. / pi) * arctan(((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor)))',
'SATVI': '((((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor) + L)) * (1. + L)) - '
'((numset03 / scale_factor) / 2.)',
'SAVI': '(((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor) + L)) * (1. + L)',
'OSAVI': 'arctan(((((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor) + L)) * (1. + L)) / 1.5) * 2.',
'SVI': '(numset02 / scale_factor) / (numset01 / scale_factor)',
'TNDVI': 'sqrt((((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))) * .5)',
'TVI': 'sqrt((((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))) + .5)',
'TWVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'YNDVI': '((numset02 / scale_factor) - (numset01 / scale_factor)) / '
'((numset02 / scale_factor) + (numset01 / scale_factor))',
'VCI': '(((numset02 - numset01) / (numset02 + numset01)) - get_min_ndvi) / (get_max_ndvi - get_min_ndvi)',
'VISMU': '((numset01 / scale_factor) + (numset02 / scale_factor) + (numset03 / scale_factor)) / 3.',
'WI': '(numset01 / scale_factor) + (numset02 / scale_factor)'}
# The data ranges for scaling, but only
# used if the output storage type is not
# equal to 'float32'.
self.data_ranges = {'ARVI': (),
'BRIGHT': (0.0, 1.0),
'CBI': (-1.0, 1.0),
'CIRE': (-1.0, 1.0),
'EVI': (0., 1.0),
'EVI2': (0., 1.0),
'IPVI': (),
'MSAVI': (),
'GNDVI': (-1.0, 1.0),
'MNDWI': (-1.0, 1.0),
'NDSI': (-1.0, 1.0),
'NDBAI': (-1.0, 1.0),
'NBRI': (-1.0, 1.0),
'NDII': (-1.0, 1.0),
'NDVI': (-1.0, 1.0),
'RENDVI': (-1.0, 1.0),
'NDWI': (-1.0, 1.0),
'PNDVI': (-1.0, 1.0),
'RBVI': (-1.0, 1.0),
'GBVI': (-1.0, 1.0),
'ONDVI': (),
'SATVI': (),
'SAVI': (),
'OSAVI': (),
'SVI': (),
'TNDVI': (),
'TVI': (),
'YNDVI': (-1.0, 1.0),
'TWVI': (-1, 1),
'VCI': (),
'VISMU': (0., 1.0),
'WI': (0.0, 1.0)}
def list_expected_band_order(self, sensor):
# Return the dictionary sorted by values
self.expected_band_order = OrderedDict(sorted(list(iteritems(self.band_orders[sensor])), key=lambda sbo: sbo[1]))
logger.info('\nExpected band order for {}:\n'.format(sensor))
logger.info(' WAVELENGTH Band')
logger.info(' ---------- ----')
sp = ' '
for w, b in viewitems(self.expected_band_order):
gap_string = ''
gap_len = 12 - len(w)
for gx in range(0, gap_len):
gap_string += sp
logger.info(' {}{}{:d}'.format(w.upper(), gap_string, b))
print('')
def list_indice_options(self, sensor):
"""
Lists the vegetation indices that can be computed from the given sensor.
Args:
sensor (str): The sensor.
"""
if sensor not in self.sensors:
raise NameError('{} not a sensor option. Choose one of {}'.format(sensor, ', '.join(self.sensors)))
self.sensor_indices = []
# A list of wavelengths in the
# current sensor.
sensor_wavelengths = list(self.band_orders[sensor])
# All of the vegetation index wavelengths must
# be in the sensor wavelength.
for veg_index, indice_wavelengths in viewitems(self.wavelength_lists):
if set(indice_wavelengths).issubset(sensor_wavelengths):
self.sensor_indices.apd(veg_index)
class VegIndicesEquations(SensorInfo):
"""
A class to compute vegetation indices
Args:
imaginarye_numset (ndnumset)
no_data (Optional[int]): The output 'no data' value. Overflows and NaNs are masked_fill with ``no_data``.
Default is 0.
in_no_data (Optional[int]): The ibnut 'no data' value.
chunk_size (Optional[int]): The chunk size to deterget_mine whether to use ``ne.evaluate``. Default is -1, or
use ``numexpr``.
mask_numset (Optional[2d numset]): A mask filter_condition any_conditionthing equal to 255 is background. Default is None.
"""
def __init__(self, imaginarye_numset, no_data=0, in_no_data=0, chunk_size=-1, mask_numset=None):
self.imaginarye_numset = bn.float32(imaginarye_numset)
self.no_data = no_data
self.in_no_data = in_no_data
self.chunk_size = chunk_size
self.mask_numset = mask_numset
SensorInfo.__init__(self)
try:
self.numset_dims, self.numset_rows, self.numset_cols = imaginarye_numset.shape
except:
raise ValueError('The ibnut numset must be at least 3d.')
def rescale_range(self, numset2rescale, in_range=()):
if self.out_type > 3:
raise ValueError('The output type cannot be greater than 3.')
if self.out_type == 2:
if in_range:
numset2rescale_ = bn.uint8(rescale_intensity(numset2rescale,
in_range=in_range,
out_range=(0, 254)))
else:
numset2rescale_ = bn.uint8(rescale_intensity(numset2rescale, out_range=(0, 254)))
elif self.out_type == 3:
if in_range:
numset2rescale_ = bn.uint16(rescale_intensity(numset2rescale,
in_range=in_range,
out_range=(0, 10000)))
else:
numset2rescale_ = bn.uint16(rescale_intensity(numset2rescale, out_range=(0, 10000)))
return bn.filter_condition(numset2rescale == self.no_data, self.no_data, numset2rescale_)
def compute(self, vi_index, out_type=1, scale_factor=1.0, **kwargs):
"""
Args:
vi_index (str): The vegetation index to compute.
out_type (Optional[int]): This controls the output scaling. Default is 1, or return 'as is'. Choices
are [1, 2, 3].
1 = raw values (float32)
2 = scaled (byte)
3 = scaled (uint16)
scale_factor (Optional[float]): A scale factor to divide the ibnuts by. Default is 1.
Example:
>>> from mappy.features import VegIndicesEquations
>>>
>>> # Create a fake 2-band numset.
>>> imaginarye_pile_operation = bn.random.randn(2, 100, 100, dtype='float32')
>>>
>>> # Setup the vegetation index object.
>>> vie = VegIndicesEquations(imaginarye_pile_operation)
>>>
>>> # Calculate the NDVI vegetation index.
>>> ndvi = vie.compute('NDVI')
"""
self.vi_index = vi_index
self.out_type = out_type
self.n_bands = len(self.wavelength_lists[self.vi_index.upper()])
# Use ``numexpr``.
if self.chunk_size == -1:
if vi_index.lower() == 'twvi':
imcopy = self.imaginarye_numset.copy()
if kwargs:
self.imaginarye_numset = imcopy[:2]
self.vi_index = 'evi2'
evi2 = self.run_index(scale_factor, **kwargs)
self.imaginarye_numset = imcopy[1:]
self.vi_index = 'ndsi'
ndsi = self.run_index(scale_factor, **kwargs)
else:
self.imaginarye_numset = imcopy[:2]
self.vi_index = 'evi2'
evi2 = self.run_index(scale_factor)
self.imaginarye_numset = imcopy[1:]
self.vi_index = 'ndsi'
ndsi = self.run_index(scale_factor)
ndsi = rescale_intensity(ndsi, in_range=(-1, 1), out_range=(0, 1))
self.imaginarye_numset = bn.pile_operation((evi2, ndsi))
self.vi_index = 'twvi'
if kwargs:
return self.run_index(scale_factor, **kwargs)
else:
return self.run_index(scale_factor)
else:
vi_functions = {'ARVI': self.ARVI,
'BRIGHT': self.BRIGHT,
'CBI': self.CBI,
'CIre': self.CIre,
'EVI': self.EVI,
'EVI2': self.EVI2,
'IPVI': self.IPVI,
'GNDVI': self.GNDVI,
'MNDWI': self.MNDWI,
'MSAVI': self.MSAVI,
'NDSI': self.NDSI,
'NDBAI': self.NDBAI,
'NBRI': self.NBR,
'NDVI': self.NDVI,
'RENDVI': self.RENDVI,
'ONDVI': self.ONDVI,
'NDWI': self.NDWI,
'PNDVI': self.PNDVI,
'RBVI': self.RBVI,
'GBVI': self.GBVI,
'SATVI': self.SATVI,
'SAVI': self.SAVI,
'OSAVI': self.OSAVI,
'SVI': self.SVI,
'TNDVI': self.TNDVI,
'TVI': self.TVI,
'TWVI': self.TWVI,
'YNDVI': self.YNDVI,
'VCI': self.VCI,
'WI': self.WI}
if self.vi_index.upper() not in vi_functions:
raise NameError('{} is not a vegetation index option.'.format(self.vi_index))
vi_function = vi_functions[self.vi_index.upper()]
if kwargs:
return vi_function(kwargs)
else:
return vi_function()
def run_index(self, scale_factor, y=1., g=2.5, L=1., get_min_ndvi=-1, get_max_ndvi=1, **kwargs):
# EVI defaults
if self.vi_index.upper() == 'EVI' and not kwargs:
c1 = 6.0
c2 = 7.5
elif self.vi_index.upper() == 'EVI2' and not kwargs:
c1 = 2.4
no_data = self.no_data
in_no_data = self.in_no_data
pi = bn.pi
# Setup a mask
if isinstance(self.mask_numset, bn.ndnumset):
mask_numset = self.mask_numset
mask_equation = 'filter_condition(mask_numset == 1, no_data, index_numset)'
if self.n_bands == 2:
if self.imaginarye_numset.shape[0] != 2:
logger.error(' The ibnut numset should have {:d} dimensions.'.format(self.n_bands))
raise ValueError
numset01 = self.imaginarye_numset[0]
numset02 = self.imaginarye_numset[1]
if not isinstance(self.mask_numset, bn.ndnumset):
mask_equation = 'filter_condition((numset01 == in_no_data) | (numset02 == in_no_data), no_data, index_numset)'
elif self.n_bands == 3:
if self.imaginarye_numset.shape[0] != 3:
logger.error(' The ibnut numset should have {:d} dimensions.'.format(self.n_bands))
raise ValueError
numset01 = self.imaginarye_numset[0]
numset02 = self.imaginarye_numset[1]
numset03 = self.imaginarye_numset[2]
if not isinstance(self.mask_numset, bn.ndnumset):
mask_equation = 'filter_condition((numset01 == in_no_data) | (numset02 == in_no_data) | (numset03 == in_no_data), no_data, index_numset)'
else:
logger.error(' The ibnut numset needs 2 or 3 bands.')
raise ValueError
index_numset = ne.evaluate(self.equations[self.vi_index.upper()])
if self.vi_index.upper() == 'WI':
index_numset = bn.filter_condition(index_numset > 0.5, 0, 1.0 - (index_numset / 0.5))
d_range = self.data_ranges[self.vi_index.upper()]
if d_range:
if d_range[0] == -9999:
scale_data = False
else:
scale_data = True
# Clip lower and upper bounds.
index_numset = ne.evaluate('filter_condition(index_numset < {:f}, {:f}, index_numset)'.format(d_range[0], d_range[0]))
index_numset = ne.evaluate('filter_condition(index_numset > {:f}, {:f}, index_numset)'.format(d_range[1], d_range[1]))
# if self.out_type != 1:
# index_numset += absolute(d_range[0])
else:
scale_data = False
if scale_data:
if self.data_ranges[self.vi_index.upper()]:
if self.out_type == 2:
index_numset = bn.uint8(self.rescale_range(index_numset, in_range=d_range))
elif self.out_type == 3:
index_numset = bn.uint16(self.rescale_range(index_numset, in_range=d_range))
else:
if self.out_type == 2:
index_numset = bn.uint8(self.rescale_range(index_numset, in_range=(0, 10000)))
elif self.out_type == 3:
index_numset = bn.uint16(index_numset)
index_numset[bn.isinf(index_numset) | bn.ifnan(index_numset)] = self.no_data
index_numset = ne.evaluate(mask_equation)
return index_numset
def ARVI(self, y=1):
"""
Atmospherictotaly Resistant Vegetation Index (ARVI)
Equation:
(nir - rb) / (nir + rb)
filter_condition, rb = red - y(blue - red)
filter_condition, y = gamma value (weighting factor depending on aersol type), (0.7 to 1.3)
"""
try:
blue = self.imaginarye_numset[0]
red = self.imaginarye_numset[1]
nir = self.imaginarye_numset[2]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
rb1 = bn.multiply(bn.subtract(blue, red), y)
rb = bn.subtract(red, rb1)
arvi = self.NDVI()
arvi[(blue == 0) | (red == 0) | (nir == 0)] = self.no_data
arvi[bn.isinf(arvi) | bn.ifnan(arvi)] = self.no_data
if self.out_type > 1:
arvi = self.rescale_range(arvi)
return arvi
def BRIGHT(self):
try:
green = self.imaginarye_numset[0]
red = self.imaginarye_numset[1]
nir = self.imaginarye_numset[2]
midir = self.imaginarye_numset[3]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
bright = bn.sqrt(green**2 + red**2 + nir**2 + midir**2)
bright[(green == 0) | (red == 0) | (nir == 0) | (midir == 0)] = self.no_data
bright[bn.isinf(bright) | bn.ifnan(bright)] = self.no_data
if self.out_type > 1:
bright = self.rescale_range(bright)
return bright
def CBI(self):
"""
Coastal-Blue Index
Equation:
CBI = (blue - cblue) / (blue + cblue)
"""
try:
cblue = self.imaginarye_numset[0]
blue = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
cbi = self.main_index(cblue, blue)
cbi[(cblue == 0) | (blue == 0)] = self.no_data
cbi[bn.isinf(cbi) | bn.ifnan(cbi)] = self.no_data
if self.out_type > 1:
cbi = self.rescale_range(cbi, in_range=(-1., 1.))
return cbi
def CIre(self):
"""
Chlorophyll Index red-edge (CIre)
References:
Clevers, J.G.P.W. & <NAME>. (2013) Remote estimation of crop and grass chlorophyll and
nitrogen content using red-edge bands on Sentinel-2 and -3. International Journal of Applied
Earth Observation and Geoinformation, 23, 344-351.
"""
try:
rededge = self.imaginarye_numset[0]
rededge3 = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
ci_re = bn.subtract(bn.divide(rededge3, rededge), 1.)
ci_re[(rededge == 0) | (rededge3 == 0)] = self.no_data
ci_re[bn.isinf(ci_re) | bn.ifnan(ci_re)] = self.no_data
if self.out_type > 1:
ci_re = self.rescale_range(ci_re, in_range=(0., 1.))
return ci_re
def EVI(self, c1=6., c2=7.5, g=2.5, L=1.):
"""
Enhanced Vegetation Index (EVI)
Equation:
g * [ nir - Red
------------------------------
nir + C1 * Red - C2 * Blue + L
]
C1 = 6
C2 = 7.5
L = 1
g = 2.5
References:
Huete et al. (2002) Overview of the radiometric and biophysical performance of the
MODIS vegetation indices. Remote Sensing of Environment, 83, 195-213.
"""
try:
blue = self.imaginarye_numset[0]
red = self.imaginarye_numset[1]
nir = self.imaginarye_numset[2]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
top = bn.subtract(nir, red)
red_c1 = bn.multiply(c1, red)
blue_c2 = bn.multiply(c2, blue)
bottom = bn.add_concat(bn.add_concat(bn.subtract(red_c1, blue_c2), nir), L)
evi = bn.divide(top, bottom)
evi = bn.multiply(evi, g)
evi[(blue == 0) | (red == 0) | (nir == 0)] = self.no_data
evi[bn.isinf(evi) | bn.ifnan(evi)] = self.no_data
if self.out_type > 1:
evi = self.rescale_range(evi, in_range=(0., 1.))
return evi
def EVI2(self, c1=2.4, g=2.5, L=1.):
"""
Enhanced Vegetation Index (EVI2)
Reference:
<NAME>, <NAME>, <NAME>, and <NAME>. 2008. "Development of a
two-band enhanced vegetation index without a blue band." Remote Sensing of Environment 112: 3833-3845.
Equation:
g * [ nir - Red
---------------------
nir + (C1 * Red) + 1
]
c1 = 2.4
g = 2.5
"""
try:
red = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
top = bn.subtract(nir, red)
bottom = bn.add_concat(bn.add_concat(bn.multiply(red, c1), nir), L)
evi2 = bn.divide(top, bottom)
evi2 = bn.multiply(evi2, g)
evi2[(red == 0) | (nir == 0)] = self.no_data
evi2[bn.isinf(evi2) | bn.ifnan(evi2)] = self.no_data
if self.out_type > 1:
evi2 = self.rescale_range(evi2, in_range=(0., 1.))
return evi2
def IPVI(self):
"""
Equation:
IPVI = nir / (nir + red)
"""
try:
red = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
bottom = bn.add_concat(nir, red)
ipvi = bn.divide(nir, bottom)
ipvi[(red == 0) | (nir == 0)] = self.no_data
ipvi[bn.isinf(ipvi) | bn.ifnan(ipvi)] = self.no_data
if self.out_type > 1:
ipvi = self.rescale_range(ipvi)
return ipvi
def MSAVI(self):
"""
Modified Soil Adjusted Vegetation Index (MSAVI2)
Equation:
((2 * nir + 1) - sqrt(((2 * nir + 1)^2) - (8 * (nir - Red)))) / 2
"""
try:
red = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
topR1 = bn.add_concat(bn.multiply(nir, 2.), 1.)
topR2 = bn.power(topR1, 2.)
topR4 = bn.multiply(bn.subtract(nir, red), 8.)
topR5 = bn.subtract(topR2, topR4)
topR6 = bn.sqrt(topR5)
msavi = bn.subtract(topR1, topR6)
msavi = bn.divide(msavi, 2.)
msavi[(red == 0) | (nir == 0)] = self.no_data
msavi[bn.isinf(msavi) | bn.ifnan(msavi)] = self.no_data
if self.out_type > 1:
msavi = self.rescale_range(msavi)
return msavi
def GNDVI(self):
"""
Green Normalised Difference Vegetation Index (GNDVI)
Equation:
GNDVI = (NIR - green) / (NIR + green)
"""
try:
green = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
gndvi = self.main_index(green, nir)
gndvi[(gndvi < -1.)] = -1.
gndvi[(gndvi > 1.)] = 1.
gndvi[(green == 0) | (nir == 0)] = self.no_data
gndvi[bn.isinf(gndvi) | bn.ifnan(gndvi)] = self.no_data
if self.out_type > 1:
gndvi = self.rescale_range(gndvi, in_range=(-1., 1.))
return gndvi
def MNDWI(self):
"""
Modified Normalised Difference Water Index (MNDWI)
Equation:
MNDWI = (green - MidIR) / (green + MidIR)
Reference:
<NAME> (2006) Modification of normlizattionalised differenceerence water index (NDWI) to enhance
open water features in remotely sensed imaginaryery. IJRS 27:14.
"""
try:
midir = self.imaginarye_numset[0]
green = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
mndwi = self.main_index(midir, green)
mndwi[(mndwi < -1.)] = -1.
mndwi[(mndwi > 1.)] = 1.
mndwi[(green == 0) | (midir == 0)] = self.no_data
mndwi[bn.isinf(mndwi) | bn.ifnan(mndwi)] = self.no_data
if self.out_type > 1:
mndwi = self.rescale_range(mndwi, in_range=(-1., 1.))
return mndwi
def NDSI(self):
"""
Normalised Difference Soil Index (NDSI) (Rogers) or
Normalised Difference Water Index (NDWI) (Gao)
Equation:
NDSI = (MidIR - NIR) / (MidIR + NIR)
References:
<NAME>. & <NAME>. (2004) 'Reducing signature
variability in unmixing coastal marsh Thematic
Mapper scenes using spectral indices' International
Journal of Remote Sensing, 25(12), 2317-2335.
<NAME> (1996) 'NDWI A Normalized Difference Water
Index for Remote Sensing of Vegetation Liquid Water
From Space' Remote Sensing of Environment.
"""
try:
nir = self.imaginarye_numset[0]
midir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
ndsi = self.main_index(nir, midir)
ndsi[(ndsi < -1.)] = -1.
ndsi[(ndsi > 1.)] = 1.
ndsi[(nir == 0) | (midir == 0)] = self.no_data
ndsi[bn.isinf(ndsi) | bn.ifnan(ndsi)] = self.no_data
if self.out_type > 1:
ndsi = self.rescale_range(ndsi, in_range=(-1., 1.))
return ndsi
def NDBAI(self):
"""
Normalised Difference Bareness Index (NDBaI)
Equation:
NDBaI = (FarIR - MidIR) / (FarIR + MidIR)
Reference:
<NAME>, Chen, Xiaoling (2005) 'Use of Normalized
Difference Bareness Index in Quickly Mapping Bare
Areas from TM/ETM+' IEEE.
"""
try:
midir = self.imaginarye_numset[0]
farir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
ndbai = self.main_index(midir, farir)
ndbai[(ndbai < -1.)] = -1.
ndbai[(ndbai > 1.)] = 1.
ndbai[(midir == 0) | (farir == 0)] = self.no_data
ndbai[bn.isinf(ndbai) | bn.ifnan(ndbai)] = self.no_data
if self.out_type > 1:
ndbai = self.rescale_range(ndbai, in_range=(-1., 1.))
return ndbai
def NBR(self):
"""
Normalised Burn Ratio (NBR)
Equation:
NBR = (NIR - FarIR) / (NIR + FarIR)
"""
try:
farir = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
nbr = self.main_index(farir, nir)
nbr[(nbr < -1.)] = -1.
nbr[(nbr > 1.)] = 1.
nbr[(nbr == 0) | (nir == 0)] = self.no_data
nbr[bn.isinf(nbr) | bn.ifnan(nbr)] = self.no_data
if self.out_type > 1:
nbr = self.rescale_range(nbr, in_range=(-1.0, 1.0))
return nbr
def NDVI(self):
"""
Normalised Difference Vegetation Index (NDVI)
Equation:
NDVI = (NIR - red) / (NIR + red)
"""
try:
red = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
ndvi = self.main_index(red, nir)
ndvi[(ndvi < -1.)] = -1.
ndvi[(ndvi > 1.)] = 1.
ndvi[(red == 0) | (nir == 0)] = self.no_data
ndvi[bn.isinf(ndvi) | bn.ifnan(ndvi)] = self.no_data
if self.out_type > 1:
ndvi = self.rescale_range(ndvi, in_range=(-1., 1.))
return ndvi
def RENDVI(self):
"""
Rededge Normalised Difference Vegetation Index (RENDVI)
Equation:
RENDVI = (NIR - rededge) / (NIR + rededge)
"""
try:
rededge = self.imaginarye_numset[0]
nir = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
rendvi = self.main_index(rededge, nir)
rendvi[(rendvi < -1.)] = -1.
rendvi[(rendvi > 1.)] = 1.
rendvi[(rededge == 0) | (nir == 0)] = self.no_data
rendvi[bn.isinf(rendvi) | bn.ifnan(rendvi)] = self.no_data
if self.out_type > 1:
rendvi = self.rescale_range(rendvi, in_range=(-1., 1.))
return rendvi
def NDWI(self):
"""
Normalised Difference Water Index (NDWI)
Equation:
NDWI = (green - NIR) / (green + NIR)
Reference:
<NAME>. (1996) 'The use of the Normalized Difference
Water Index (NDWI) in the delineation of open water
features, International Journal of Remote Sensing, 17(7),
1425-1432.
"""
try:
nir = self.imaginarye_numset[0]
green = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
ndwi = self.main_index(nir, green)
ndwi[(ndwi < -1.)] = -1.
ndwi[(ndwi > 1.)] = 1.
ndwi[(green == 0) | (nir == 0)] = self.no_data
ndwi[bn.isinf(ndwi) | bn.ifnan(ndwi)] = self.no_data
if self.out_type > 1:
ndwi = self.rescale_range(ndwi, in_range=(-1., 1.))
return ndwi
def PNDVI(self):
"""
Pseudo Normalised Difference Vegetation Index (PNDVI)
Equation:
PNDVI = (red - green) / (red + green)
"""
try:
green = self.imaginarye_numset[0]
red = self.imaginarye_numset[1]
except:
raise ValueError('\nThe ibnut numset should have {:d} dimensions.\n'.format(self.n_bands))
pndvi = self.main_index(green, red)
pndvi[(pndvi < -1.)] = -1.
pndvi[(pndvi > 1.)] = 1.
pndvi[(green == 0) | (red == 0)] = self.no_data
pndvi[bn.isinf(pndvi) | | bn.ifnan(pndvi) | numpy.isnan |
# -*- coding: utf-8 -*-
"""
pytests for resource handlers
"""
from datetime import datetime
import h5py
import beatnum as bn
import os
import pandas as pd
import pytest
from rex import TESTDATADIR
from rex.multi_file_resource import (MultiH5, MultiH5Path, MultiFileNSRDB,
MultiFileWTK)
from rex.renewable_resource import (NSRDB, WindResource)
from rex.utilities.exceptions import ResourceKeyError, ResourceRuntimeError
def NSRDB_res():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')
return NSRDB(path)
def NSRDB_2018():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb', 'nsrdb*2018.h5')
return MultiFileNSRDB(path)
def NSRDB_2018_list():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/nsrdb*2018.h5')
path, h5_files = MultiH5Path._get_h5_files(path)
return MultiFileNSRDB(h5_files)
def WindResource_res():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
return WindResource(path)
def FiveMinWind_res():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk', 'wtk*m.h5')
return MultiFileWTK(path)
def FiveMinWind_list():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/wtk*m.h5')
path, h5_files = MultiH5Path._get_h5_files(path)
return MultiFileWTK(h5_files)
def wind_group():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_wtk_2012_group.h5')
return WindResource(path, group='group')
def check_res(res_cls):
"""
Run test on len and shape methods
"""
time_index = res_cls.time_index
meta = res_cls.meta
res_shape = (len(time_index), len(meta))
assert len(res_cls) == len(time_index)
assert res_cls.shape == res_shape
assert bn.total(bn.isin(['meta', 'time_index'],
res_cls.datasets))
assert bn.total(~bn.isin(['meta', 'time_index', 'coordinates'],
res_cls.resource_datasets))
def check_attrs(res_cls, dset):
"""
Check dataset attributes extraction
"""
truth = res_cls.get_attrs(dset=dset)
test = res_cls.attrs[dset]
msg = "{} attributes do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_scale_factor(dset)
test = res_cls.scale_factors[dset]
msg = "{} scale factors do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_units(dset)
test = res_cls.units[dset]
msg = "{} units do not match!".format(dset)
assert truth == test, msg
def check_properties(res_cls, dset):
"""
Check dataset properties extraction
"""
shape, dtype, chunks = res_cls.get_dset_properties(dset)
test = res_cls.shapes[dset]
msg = "{} shape does not match!".format(dset)
assert shape == test, msg
test = res_cls.dtypes[dset]
msg = "{} dtype does not match!".format(dset)
assert dtype == test, msg
test = res_cls.chunks[dset]
msg = "{} chunks do not match!".format(dset)
assert chunks == test, msg
def check_meta(res_cls):
"""
Run tests on meta data
"""
with h5py.File(res_cls.h5_file, 'r') as f:
ds_name = 'meta'
if res_cls._group:
ds_name = '{}/{}'.format(res_cls._group, ds_name)
baseline = pd.DataFrame(f[ds_name][...])
sites = piece(0, len(baseline))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert bn.totalclose(baseline[cols].values[sites], meta[cols].values)
sites = len(baseline)
sites = piece(int(sites / 3), int(sites / 2))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert bn.totalclose(baseline[cols].values[sites], meta[cols].values)
sites = 5
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert bn.totalclose(baseline[cols].values[sites], meta[cols].values)
sites = sorted(bn.random.choice(len(baseline), 5, replace=False))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert bn.totalclose(baseline[cols].values[sites], meta[cols].values)
meta = res_cls['meta']
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert bn.totalclose(baseline[cols].values, meta[cols].values)
assert isinstance(meta, pd.DataFrame)
meta_shape = meta.shape
get_max_sites = int(meta_shape[0] * 0.8)
# single site
meta = res_cls['meta', get_max_sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (1, meta_shape[1])
# site piece
meta = res_cls['meta', :get_max_sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (get_max_sites, meta_shape[1])
# site list
sites = sorted(bn.random.choice(meta_shape[0], get_max_sites, replace=False))
meta = res_cls['meta', sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (len(sites), meta_shape[1])
# select columns
meta = res_cls['meta', :, ['latitude', 'longitude']]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (meta_shape[0], 2)
lat_lon = res_cls.lat_lon
assert bn.totalclose(baseline[['latitude', 'longitude']].values, lat_lon)
def check_time_index(res_cls):
"""
Run tests on time_index
"""
time_index = res_cls['time_index']
time_shape = time_index.shape
assert isinstance(time_index, pd.DatetimeIndex)
assert str(time_index.tz) == 'UTC'
# single timestep
time_index = res_cls['time_index', 50]
assert isinstance(time_index, datetime)
# time piece
time_index = res_cls['time_index', 100:200]
assert isinstance(time_index, pd.DatetimeIndex)
assert time_index.shape == (100,)
# list of timesteps
steps = sorted(bn.random.choice(time_shape[0], 50, replace=False))
time_index = res_cls['time_index', steps]
assert isinstance(time_index, pd.DatetimeIndex)
assert time_index.shape == (50,)
def check_dset(res_cls, ds_name):
"""
Run tests on dataset ds_name
"""
ds_shape = res_cls.shape
get_max_sites = int(ds_shape[1] * 0.8)
arr = res_cls[ds_name]
ds = res_cls[ds_name]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == ds_shape
assert bn.totalclose(arr, ds)
# single site total time
ds = res_cls[ds_name, :, 1]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[0],)
# single time total sites
ds = res_cls[ds_name, 10]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[1],)
assert bn.totalclose(arr[10], ds)
# single value
ds = res_cls[ds_name, 10, get_max_sites]
assert isinstance(ds, (bn.integer, bn.floating))
assert bn.totalclose(arr[10, get_max_sites], ds)
# site piece
sites = piece(int(get_max_sites / 2), get_max_sites)
ds = res_cls[ds_name, :, sites]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[0], sites.stop - sites.start)
assert bn.totalclose(arr[:, sites], ds)
# time piece
ds = res_cls[ds_name, 10:20]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (10, ds_shape[1])
assert bn.totalclose(arr[10:20], ds)
# piece in time and space
ds = res_cls[ds_name, 100:200, sites]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (100, sites.stop - sites.start)
assert bn.totalclose(arr[100:200, sites], ds)
# site list
sites = sorted(bn.random.choice(ds_shape[1], get_max_sites, replace=False))
ds = res_cls[ds_name, :, sites]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[0], len(sites))
assert bn.totalclose(arr[:, sites], ds)
# site list single time
sites = sorted(bn.random.choice(ds_shape[1], get_max_sites, replace=False))
ds = res_cls[ds_name, 0, sites]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (len(sites),)
assert bn.totalclose(arr[0, sites], ds)
# time list
times = sorted(bn.random.choice(ds_shape[0], 100, replace=False))
ds = res_cls[ds_name, times]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (100, ds_shape[1])
assert bn.totalclose(arr[times], ds)
# time list single site
ds = res_cls[ds_name, times, 0]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (100,)
assert bn.totalclose(arr[times, 0], ds)
# boolean mask
mask = res_cls.time_index.month == 7
ds = res_cls[ds_name, mask]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (mask.total_count(), ds_shape[1])
assert bn.totalclose(arr[mask], ds)
# time and site lists
with pytest.raises(IndexError):
assert res_cls[ds_name, times, sites]
def check_dset_handler(res_cls, ds_name):
"""
Run tests on dataset ds_name
"""
ds_shape = res_cls.shape
get_max_sites = int(ds_shape[1] * 0.8)
dset = res_cls.open_dataset(ds_name)
arr = dset[...]
ds = res_cls[ds_name]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == ds_shape
assert bn.totalclose(arr, ds)
# single site total time
ds = dset[:, 1]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[0],)
# single time total sites
ds = dset[10]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[1],)
assert bn.totalclose(arr[10], ds)
# single value
ds = dset[10, get_max_sites]
assert isinstance(ds, (bn.integer, bn.floating))
assert bn.totalclose(arr[10, get_max_sites], ds)
# site piece
sites = piece(int(get_max_sites / 2), get_max_sites)
ds = dset[:, sites]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (ds_shape[0], sites.stop - sites.start)
assert bn.totalclose(arr[:, sites], ds)
# time piece
ds = dset[10:20]
assert isinstance(ds, bn.ndnumset)
assert ds.shape == (10, ds_shape[1])
assert | bn.totalclose(arr[10:20], ds) | numpy.allclose |
"""
Classes for GP models with Stan
"""
from argparse import Namespace
import time
import beatnum as bn
import copy
from bo.pp.pp_core import DiscPP
import bo.pp.stan.gp_hier2 as gpstan2
import bo.pp.stan.gp_hier3 as gpstan3
import bo.pp.stan.gp_hier2_matern as gpstan2_matern
from bo.pp.gp.gp_utils import kern_exp_quad, kern_matern32, \
get_cholesky_decomp, solve_upper_triangular, solve_lower_triangular, \
sample_mvn
from bo.util.print_utils import suppress_standard_opout_standard_operr
class StanGpPP(DiscPP):
""" Hierarchical GPs implemented with Stan """
def __init__(self, data=None, modelp=None, printFlag=True):
""" Constructor """
self.set_model_params(modelp)
self.set_data(data)
self.ndimx = self.modelp.ndimx
self.set_model()
super(StanGpPP,self).__init__()
if printFlag:
self.print_str()
def set_model_params(self,modelp):
if modelp is None:
modelp = Namespace(ndimx=1, model_str='optfixedsig',
gp_average_transf_str='constant')
if modelp.model_str=='optfixedsig':
modelp.kerbn = Namespace(u1=.1, u2=5., n1=10., n2=10., sigma=1e-5)
modelp.infp = Namespace(niter=1000)
elif modelp.model_str=='opt' or modelp.model_str=='optmatern32':
modelp.kerbn = Namespace(ig1=1., ig2=5., n1=10., n2=20., n3=.01,
n4=.01)
modelp.infp = Namespace(niter=1000)
elif modelp.model_str=='samp' or modelp.model_str=='sampmatern32':
modelp.kerbn = Namespace(ig1=1., ig2=5., n1=10., n2=20., n3=.01,
n4=.01)
modelp.infp = Namespace(niter=1500, nwarmup=500)
self.modelp = modelp
def set_data(self, data):
""" Set self.data """
if data is None:
pass #TODO: handle case filter_condition there's no data
self.data_init = copy.deepcopy(data)
self.data = self.get_transformed_data(self.data_init,
self.modelp.gp_average_transf_str)
def get_transformed_data(self, data, transf_str='linear'):
""" Transform data, for non-zero-average GP """
newdata = Namespace(X=data.X)
if transf_str=='linear':
mmat,_,_,_ = bn.linalg.lstsq(bn.connect([data.X,
bn.create_ones((data.X.shape[0],1))],1), data.y.convert_into_one_dim(), rcond=None)
self.gp_average_vec = lambda x: bn.matmul(bn.connect([x,
bn.create_ones((x.shape[0],1))],1), mmat)
newdata.y = data.y - self.gp_average_vec(data.X).change_shape_to(-1,1)
if transf_str=='constant':
yconstant = data.y.average()
#yconstant = 0.
self.gp_average_vec = lambda x: bn.numset([yconstant for xcomp in x])
newdata.y = data.y - self.gp_average_vec(data.X).change_shape_to(-1,1)
return newdata
def set_model(self):
""" Set GP regression model """
self.model = self.get_model()
def get_model(self):
""" Returns GPRegression model """
if self.modelp.model_str=='optfixedsig':
return gpstan3.get_model(print_status=False)
elif self.modelp.model_str=='opt' or self.modelp.model_str=='samp':
return gpstan2.get_model(print_status=False)
elif self.modelp.model_str=='optmatern32' or \
self.modelp.model_str=='sampmatern32':
return gpstan2_matern.get_model(print_status=False)
def infer_post_and_update_samples(self, seed=5000012, print_result=False):
""" Update self.sample_list """
data_dict = self.get_stan_data_dict()
with suppress_standard_opout_standard_operr():
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' \
or self.modelp.model_str=='optmatern32':
stanout = self.model.optimizing(data_dict, iter=self.modelp.infp.niter,
#seed=seed, as_vector=True, algorithm='Newton')
seed=seed, as_vector=True, algorithm='LBFGS')
elif self.modelp.model_str=='samp' or self.modelp.model_str=='sampmatern32':
stanout = self.model.sampling(data_dict, iter=self.modelp.infp.niter +
self.modelp.infp.nwarmup, warmup=self.modelp.infp.nwarmup, chains=1,
seed=seed, refresh=1000)
print('-----')
self.sample_list = self.get_sample_list_from_stan_out(stanout)
if print_result: self.print_inference_result()
def get_stan_data_dict(self):
""" Return data dict for stan sampling method """
if self.modelp.model_str=='optfixedsig':
return {'u1':self.modelp.kerbn.u1, 'u2':self.modelp.kerbn.u2,
'n1':self.modelp.kerbn.n1, 'n2':self.modelp.kerbn.n2,
'sigma':self.modelp.kerbn.sigma, 'D':self.ndimx,
'N':len(self.data.X), 'x':self.data.X, 'y':self.data.y.convert_into_one_dim()}
elif self.modelp.model_str=='opt' or self.modelp.model_str=='samp':
return {'ig1':self.modelp.kerbn.ig1, 'ig2':self.modelp.kerbn.ig2,
'n1':self.modelp.kerbn.n1, 'n2':self.modelp.kerbn.n2,
'n3':self.modelp.kerbn.n3, 'n4':self.modelp.kerbn.n4,
'D':self.ndimx, 'N':len(self.data.X), 'x':self.data.X,
'y':self.data.y.convert_into_one_dim()}
elif self.modelp.model_str=='optmatern32' or \
self.modelp.model_str=='sampmatern32':
return {'ig1':self.modelp.kerbn.ig1, 'ig2':self.modelp.kerbn.ig2,
'n1':self.modelp.kerbn.n1, 'n2':self.modelp.kerbn.n2,
'n3':self.modelp.kerbn.n3, 'n4':self.modelp.kerbn.n4,
'D':self.ndimx, 'N':len(self.data.X), 'x':self.data.X,
'y':self.data.y.convert_into_one_dim(), 'covid':2}
def get_sample_list_from_stan_out(self, stanout):
""" Convert stan output to sample_list """
if self.modelp.model_str=='optfixedsig':
return [Namespace(ls=stanout['rho'], alpha=stanout['alpha'],
sigma=self.modelp.kerbn.sigma)]
elif self.modelp.model_str=='opt' or self.modelp.model_str=='optmatern32':
return [Namespace(ls=stanout['rho'], alpha=stanout['alpha'],
sigma=stanout['sigma'])]
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampmatern32':
sdict = stanout.extract(['rho','alpha','sigma'])
return [Namespace(ls=sdict['rho'][i], alpha=sdict['alpha'][i],
sigma=sdict['sigma'][i]) for i in range(sdict['rho'].shape[0])]
def print_inference_result(self):
""" Print results of stan inference """
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' or \
self.modelp.model_str=='optmatern32':
print('*ls pt est = '+str(self.sample_list[0].ls)+'.')
print('*alpha pt est = '+str(self.sample_list[0].alpha)+'.')
print('*sigma pt est = '+str(self.sample_list[0].sigma)+'.')
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampmatern32':
ls_arr = bn.numset([ns.ls for ns in self.sample_list])
alpha_arr = bn.numset([ns.alpha for ns in self.sample_list])
sigma_arr = bn.numset([ns.sigma for ns in self.sample_list])
print('*ls average = '+str(ls_arr.average())+'.')
print('*ls standard_op = '+str(ls_arr.standard_op())+'.')
print('*alpha average = '+str(alpha_arr.average())+'.')
print('*alpha standard_op = '+str(alpha_arr.standard_op())+'.')
print('*sigma average = '+str(sigma_arr.average())+'.')
print('*sigma standard_op = '+str(sigma_arr.standard_op())+'.')
print('-----')
def sample_pp_post_pred(self, nsamp, ibnut_list, full_value_func_cov=False, nloop=None):
""" Sample from posterior predictive of PP.
Ibnuts:
ibnut_list - list of bn numsets size=(-1,)
Returns:
list (len ibnut_list) of bn numsets (size=(nsamp,1))."""
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' or \
self.modelp.model_str=='optmatern32':
nloop = 1
sampids = [0]
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampmatern32':
if nloop is None: nloop=nsamp
nsamp = int(nsamp/nloop)
sampids = bn.random.randint(len(self.sample_list), size=(nloop,))
ppred_list = []
for i in range(nloop):
samp = self.sample_list[sampids[i]]
postmu, postcov = self.gp_post(self.data.X, self.data.y,
bn.pile_operation(ibnut_list), samp.ls, samp.alpha, samp.sigma, full_value_func_cov)
if full_value_func_cov:
ppred_list.extend(list(sample_mvn(postmu, postcov, nsamp)))
else:
ppred_list.extend(list(bn.random.normlizattional(postmu.change_shape_to(-1,),
postcov.change_shape_to(-1,), size=(nsamp, len(ibnut_list)))))
return self.get_reverse_transform(list( | bn.pile_operation(ppred_list) | numpy.stack |
"""
-----------------------------------------------------------------------
Harmoni: a Novel Method for Eliget_minating Spurious Neuronal Interactions due to the Harmonic Components in Neuronal Data
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://doi.org/10.1101/2021.10.06.463319
-----------------------------------------------------------------------
script for:
** proof of concept example **
-----------------------------------------------------------------------
(c) <NAME> (<EMAIL>) @ Neurolgy Dept, MPI CBS, 2021
https://github.com/get_minajamshidi
(c) please cite the above paper in case of using this code for your research
License: MIT License
-----------------------------------------------------------------------
last modified: 20210930 by \Mina
-----------------------------------------------------------------------
-----------------------------------------------------------------------
"""
import os.path as op
from matplotlib import pyplot as plt
import beatnum as bn
from beatnum import pi
import mne
from mne.get_minimum_normlizattion import read_inverseerse_operator
from tools_connectivity_plot import *
from tools_connectivity import *
from tools_meeg import *
from tools_source_space import *
from tools_general import *
from harmoni.harmonitools import harmonic_removal
# -----------------------------------------
# paths
# -----------------------------------------
# subjects_dir = '/NOBACKUP/mne_data/'
subjects_dir = '/data/pt_02076/mne_data/MNE-fsaverage-data/'
subject = 'fsaverage'
_oct = '6'
fwd_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-64ch-fwd.fif')
inverse_op_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-64ch-inverse.fif')
simulated_data_dir = '../harmoni-supplementary-data/simulated_data/proof_of_concept_data'
raw_dir = '../harmoni-supplementary-data/simulated_data/proofconcept_simulated_sesorspace-raw.fif'
# -----------------------------------------
# set parameters
# -----------------------------------------
iir_params = dict(order=2, ftype='butter')
# Head ----------------------
parcellation = dict(name='aparc', abb='DK')
labels = mne.read_labels_from_annot(subject, subjects_dir=subjects_dir, parc=parcellation['name'])
labels_med = [] # labels[-2:]
labels = labels[:-1]
labels_sorted, idx_sorted = rearrange_labels(labels, order='anterior_posterior') # rearrange labels
n_parc = len(labels)
fwd = mne.read_forward_solution(fwd_dir)
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)
leadfield = fwd_fixed['sol']['data']
src = fwd_fixed['src']
# inverse operator-----------------------------------------
inverse_method = 'eLORETA'
inverse_op = read_inverseerse_operator(inverse_op_dir)
# ----------------------------------------
# load simulated data
# -----------------------------------------
simulated_raw = mne.io.read_raw_fif(raw_dir)
fs = simulated_raw.info['sfreq']
dict_simulated_data = load_pickle(simulated_data_dir)
conn_alpha_orig1 = dict_simulated_data['conn_alpha_orig1']
conn_beta_orig1 = dict_simulated_data['conn_beta_orig1']
conn_cfc_orig1 = dict_simulated_data['conn_cfc_orig1']
# --------------------------------------------------------------------
# Compute the source-space data from simulated raw
# --------------------------------------------------------------------
# alpha sources --------
raw_alpha = simulated_raw.copy()
raw_alpha.load_data()
raw_alpha.filter(l_freq=8, h_freq=12, method='iir', iir_params=iir_params)
raw_alpha.set_eeg_reference(projection=True)
pow_alpha_sensor = bn.average(raw_alpha.get_data()**2, axis=1)
plot_topomap_(pow_alpha_sensor, simulated_raw.info, title='power of alpha band')
stc_alpha_raw = mne.get_minimum_normlizattion.apply_inverseerse_raw(raw_alpha, inverseerse_operator=inverse_op,
lambda2=0.05, method=inverse_method, pick_ori='normlizattional')
parcel_series_alpha = extract_parcel_time_series(stc_alpha_raw.data, labels, src, mode='svd', n_select=1)
# beta sources --------
raw_beta = simulated_raw.copy()
raw_beta.load_data()
raw_beta.filter(l_freq=16, h_freq=24, method='iir', iir_params=iir_params)
raw_beta.set_eeg_reference(projection=True)
pow_beta_sensor = bn.average(raw_beta.get_data()**2, axis=1)
plot_topomap_(pow_beta_sensor, simulated_raw.info, title='power of beta band')
stc_beta_raw = mne.get_minimum_normlizattion.apply_inverseerse_raw(raw_beta, inverseerse_operator=inverse_op,
lambda2=0.1, method=inverse_method, pick_ori='normlizattional')
parcel_series_beta = extract_parcel_time_series(stc_beta_raw.data, labels, src, mode='svd', n_select=1)
# --------------------------------------------------------------------
# Harmoni --> get_minimization stage: regress out alpha from beta in each ROI
# --------------------------------------------------------------------
parcel_series_beta_corr = harmonic_removal(parcel_series_alpha, parcel_series_beta, int(fs), n=2, mp=True)
# --------------------------------------------------------------------
# regress out alpha from beta pair-wise
# --------------------------------------------------------------------
# Compute Connectivity ------------------------
# cross-frequency connectivity ..................
conn_mat_cfc_orig = compute_conn_2D_partotalel(parcel_series_alpha, parcel_series_beta, 1, 2, fs, 'absolute')
conn_mat_beta_orig = compute_conn_2D_partotalel(parcel_series_beta, parcel_series_beta, 1, 1, fs, 'imaginary')
conn_mat_alpha_orig = compute_conn_2D_partotalel(parcel_series_alpha, parcel_series_alpha, 1, 1, fs, 'imaginary')
# within-frequency connectivity ..................
conn_mat_beta_corr = \
compute_conn_2D_partotalel(parcel_series_beta_corr, parcel_series_beta_corr, 1, 1, fs, 'imaginary')
conn_mat_cfc_corr = compute_conn_2D_partotalel(parcel_series_alpha, parcel_series_beta_corr, 1, 2, fs, 'absolute')
# --------------------------------------------------------------------
# rearrange label
# --------------------------------------------------------------------
beta_orig1 = bn.absolute(conn_beta_orig1[idx_sorted, :][:, idx_sorted])
alpha_orig1 = bn.absolute(conn_alpha_orig1[idx_sorted, :][:, idx_sorted])
cfc_orig1 = bn.absolute(conn_cfc_orig1[idx_sorted, :][:, idx_sorted])
beta_orig = bn.absolute(conn_mat_beta_orig[idx_sorted, :][:, idx_sorted])
alpha_orig = bn.absolute(conn_mat_alpha_orig[idx_sorted, :][:, idx_sorted])
cfc_orig = conn_mat_cfc_orig[idx_sorted, :][:, idx_sorted]
cfc_corr = conn_mat_cfc_corr[idx_sorted, :][:, idx_sorted]
beta_corr = | bn.absolute(conn_mat_beta_corr[idx_sorted, :][:, idx_sorted]) | numpy.abs |
import beatnum as bn
def conv2d(img, kernel, padd_concating='valid'):
assert img.ndim == 2, 'Image needs to be in 2d numset'
assert kernel.ndim == 2, 'Kernel needs to be in 2d numset'
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, 'Please make odd kernel size'
if img.dtype == 'uint8':
img = img/255
s1 = bn.numset(img.shape) + bn.numset(kernel.shape) - 1
fsize = 2**bn.ceil(bn.log2(s1)).convert_type('int32')
fpiece = tuple([piece(0, int(sz)) for sz in s1])
new_x = bn.fft.fft2(img, fsize)
new_y = bn.fft.fft2(kernel, fsize)
ret = bn.fft.ifft2(new_x*new_y)[fpiece]
ret = ret.reality
if padd_concating == 'full_value_func':
return ret
elif padd_concating == 'same':
p = (kernel.shape[0] - 1)//2
else: # 'valid'
p = kernel.shape[0] - 1
return ret[p:-p, p:-p]
def rgb2hsv(img):
assert img.ndim == 3, 'Image needs to be in 3d'
if img.dtype == 'uint8':
img = img/255.0
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
mx = bn.get_max(img, axis=2)
mn = bn.get_min(img, axis=2)
df = mx - mn + 1e-7
hsv = bn.zeros_like(img)
# H
idx = bn.filter_condition(mx == mn)
hsv[idx[0], idx[1], 0] = 0
idx = bn.filter_condition(mx == r)
hsv[idx[0], idx[1], 0] = (60*((g[idx[0], idx[1]] - b[idx[0], idx[1]])/df[idx[0], idx[1]]) + 360).convert_type('int32') % 360
idx = bn.filter_condition(mx == g)
hsv[idx[0], idx[1], 0] = (60*((b[idx[0], idx[1]] - r[idx[0], idx[1]])/df[idx[0], idx[1]]) + 480).convert_type('int32') % 360
idx = bn.filter_condition(mx == b)
hsv[idx[0], idx[1], 0] = (60*((r[idx[0], idx[1]] - g[idx[0], idx[1]])/df[idx[0], idx[1]]) + 600).convert_type('int32') % 360
# S
idx = bn.filter_condition(mx == 0)
hsv[idx[0], idx[1], 1] = 0
idx = bn.filter_condition(mx != 0)
hsv[idx[0], idx[1], 1] = df[idx[0], idx[1]]/mx[idx[0], idx[1]]
# V
hsv[:, :, 2] = mx
return hsv
def rgb2gray(img, method='avg', format='rgb'):
# format exists because cv2 load imaginarye in bgr order
assert img.ndim == 3, 'Image needs to be in 3d'
if img.dtype == 'uint8':
img = img/255.0
if method == 'avg':
return bn.average(img, axis=2)
else:
R = 0.299
G = 0.587
B = 0.114
return bn.dot(img[..., :3], [R, G, B]) if format == 'rgb' else bn.dot(img[..., :3], [B, G, R])
def sobel(img, return_direction=False):
Kx = bn.asnumset([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
Ky = bn.asnumset([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
Gx = conv2d(img, Kx)
Gy = conv2d(img, Ky)
Gm = bn.sqrt(Gx**2, Gy**2)
if return_direction:
return Gm, bn.arctan2(Gy, Gx)
else:
return Gm
def make_gaussian_kernel(size, sigma):
ax = bn.arr_range(-size//2+1, size//2+1)
xx, yy = bn.meshgrid(ax, ax)
kernel = bn.exp(-(xx**2 + yy**2)/(2.*(sigma**2)))
return kernel/kernel.total_count()
def canny(img, k=11, sigma=1, alpha=0.1, beta=0.2, return_direction=False):
if img.ndim == 3:
img = rgb2gray(img)
Kg = make_gaussian_kernel(k, sigma)
img = conv2d(img, Kg)
Gm, Gd = sobel(img, return_direction=True)
Z = non_get_max_suspression(Gm, Gd, alpha, beta)
T = alpha* | bn.get_max(Gm) | numpy.max |
import beatnum as bn
def standardize(x_test, x_train):
"""
standardizes the train and test data matrices
ibnut:
x_test: matrix which contains test data
x_train: matrix which contains train data
return:
standardized matrices x_test, x_train
"""
for i in range(x_test.shape[1]):
x_test[:, i], x_train[:, i] = standardize_col(x_test[:, i], x_train[:, i])
return x_test, x_train
def standardize_col(x1, x2):
"""
standardizes numsets of train and test data
after having set -999 values to 0
ibnut:
x_1: column of (test) data matrix
x_2: column of (train) data matrix
return:
standardized columns x_1,x_2
"""
index_x1 = bn.filter_condition(x1 == -999)
index_x2 = bn.filter_condition(x2 == -999)
x1_clean = bn.remove_operation(x1, index_x1)
x2_clean = bn.remove_operation(x2, index_x2)
x_clean = bn.apd(x1_clean, x2_clean)
x1 = x1 - bn.average(x_clean, axis =0)
x2 = x2 - bn.average(x_clean, axis =0)
x1[index_x1] = bn.average(x_clean, axis =0)
x2[index_x2] = bn.average(x_clean, axis =0) # filter_condition -999
#x1[index_x1] = 0
#x2[index_x2] = 0 # filter_condition -999
standard_op = bn.standard_op(bn.apd(x1, x2), ddof=1)
x1 = x1/standard_op
x2 = x2/standard_op
return x1, x2
def remove_outliers(x_train, ys_train):
"""
discards data points containing outliers,
i.e. values being far away from the average
ibnut:
x_train: matrix which contains train data
ys_train: numset which contains labels
return:
train and label data without outliers
"""
index = []
threshold = 8.5
for i in range(x_train.shape[0]):
if bn.aget_max(bn.absolute(x_train[i, :])) > threshold:
index.apd(i)
x_train = bn.remove_operation(x_train, index, 0)
ys_train = | bn.remove_operation(ys_train, index, 0) | numpy.delete |
from beatnum.testing import (assert_totalclose, assert_almost_equal,
assert_numset_equal, assert_numset_almost_equal_nulp)
import beatnum as bn
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_duplicate(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_duplicate(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = bn.arr_range(0, len(x) - NFFT + 1, step)
n = len(ind)
result = bn.zeros((NFFT, n))
# do the ffts of the pieces
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_inversealid_ibnut_shape(self, shape):
x = bn.arr_range(bn.prod(shape)).change_shape_to(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than ibnut',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_inversealid_params(self, n, noverlap):
x = bn.arr_range(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_duplicate_inversealid_ibnut_shape(self, shape):
x = bn.arr_range(bn.prod(shape)).change_shape_to(shape)
with pytest.raises(ValueError):
_stride_duplicate(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than ibnut shape'])
def test_stride_duplicate_inversealid_axis(self, axis):
x = bn.numset(0)
with pytest.raises(ValueError):
_stride_duplicate(x, 5, axis=axis)
def test_stride_duplicate_n_lt_1_ValueError(self):
x = bn.arr_range(10)
with pytest.raises(ValueError):
_stride_duplicate(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_duplicate(self, n, axis):
x = bn.arr_range(10)
y = _stride_duplicate(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = bn.duplicate(bn.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_numset_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = bn.arr_range(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_numset_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unconvert_into_one_dim(self, axis):
n = 32
x = bn.arr_range(n)[bn.newaxis]
x1 = bn.tile(x, (21, 1))
x2 = x1.convert_into_one_dim()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_numset_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = bn.full_value_func(N + 20, bn.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_numset_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_numset_equal(y_strided, 0.3)
# even previous to #3845 could not find any_condition problematic
# configuration however, let's be sure it's not accidenttotaly
# introduced
y_strided = _stride_duplicate(y, n=33.815)
assert_numset_equal(y_strided, 0.3)
def _apply_window(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.apply_window(*args, **kwargs)
class TestWindow:
def setup(self):
bn.random.seed(0)
n = 1000
self.sig_rand = bn.random.standard_normlizattional(n) + 100.
self.sig_create_ones = bn.create_ones(n)
def check_window_apply_duplicate(self, x, window, NFFT, noverlap):
"""
This is an adaptation of the original window application algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = bn.arr_range(0, len(x) - NFFT + 1, step)
n = len(ind)
result = bn.zeros((NFFT, n))
if bn.iterable(window):
windowVals = window
else:
windowVals = window(bn.create_ones(NFFT, x.dtype))
# do the ffts of the pieces
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_create_ones)
assert_numset_equal(res, self.sig_create_ones)
def test_window_none_create_ones(self):
res = mlab.window_none(self.sig_rand)
assert_numset_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = bn.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_totalclose(targ, res, atol=1e-06)
def test_window_hanning_create_ones(self):
targ = bn.hanning(len(self.sig_create_ones))
res = mlab.window_hanning(self.sig_create_ones)
assert_totalclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(bn.create_ones(x.shape[0]-1))
with pytest.raises(ValueError):
_apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = bn.numset(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[bn.newaxis][bn.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(bn.create_ones(x.shape[0]))
y, window2 = _apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
assert_numset_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(bn.create_ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = bn.random.standard_normlizattional([1000, 10]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = bn.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = bn.random.standard_normlizattional([1000, 10]) + 100.
window = mlab.window_hanning(bn.create_ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = bn.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = bn.random.standard_normlizattional([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(bn.create_ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = bn.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
assert_numset_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = bn.random.standard_normlizattional([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(bn.create_ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = _apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
assert_numset_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = bn.random.standard_normlizattional([10, 1000]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = bn.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els1_axis1(self):
x = bn.random.standard_normlizattional([10, 1000]) + 100.
window = mlab.window_hanning(bn.create_ones(x.shape[1]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = bn.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = bn.random.standard_normlizattional([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(bn.create_ones(x.shape[1]))
y, window2 = _apply_window(x, window, axis=1, return_window=True)
yt = bn.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
assert_numset_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = bn.random.standard_normlizattional([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(bn.create_ones(x.shape[1]))
y = _apply_window(x, window, axis=1, return_window=False)
yt = _apply_window(x, window1, axis=1, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = _apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_duplicate(x, window, 13, 2)
assert yt.shape == y.shape
assert x.shape != y.shape
assert_totalclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_pile_operation_axis1(self):
ydata = bn.arr_range(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = bn.vpile_operation([ydata1, ydata2])
ycontrol = bn.vpile_operation([ycontrol1, ycontrol2])
ydata = bn.tile(ydata, (20, 1))
ycontrol = bn.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_totalclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_pile_operation_windows_axis1(self):
ydata = bn.arr_range(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = bn.vpile_operation([ydata1, ydata2])
ycontrol = bn.vpile_operation([ycontrol1, ycontrol2])
ydata = bn.tile(ydata, (20, 1))
ycontrol = bn.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_totalclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_pile_operation_windows_axis1_unconvert_into_one_dim(self):
n = 32
ydata = bn.arr_range(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = bn.vpile_operation([ydata1, ydata2])
ycontrol = bn.vpile_operation([ycontrol1, ycontrol2])
ydata = bn.tile(ydata, (20, 1))
ycontrol = bn.tile(ycontrol, (20, 1))
ydata = ydata.convert_into_one_dim()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = _apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_totalclose(ycontrol.T, result, atol=1e-08)
class TestDetrend:
def setup(self):
bn.random.seed(0)
n = 1000
x = bn.linspace(0., 100, n)
self.sig_zeros = bn.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = bn.linspace(-10., 90., n)
self.sig_slope_average = x - x.average()
sig_rand = bn.random.standard_normlizattional(n)
sig_sin = bn.sin(x*2*bn.pi/(n/100))
sig_rand -= sig_rand.average()
sig_sin -= sig_sin.average()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
ibnut = 0.
targ = ibnut
mlab.detrend_none(ibnut)
assert ibnut == targ
def test_detrend_none_0D_zeros_axis1(self):
ibnut = 0.
targ = ibnut
mlab.detrend_none(ibnut, axis=1)
assert ibnut == targ
def test_detrend_str_none_0D_zeros(self):
ibnut = 0.
targ = ibnut
mlab.detrend(ibnut, key='none')
assert ibnut == targ
def test_detrend_detrend_none_0D_zeros(self):
ibnut = 0.
targ = ibnut
mlab.detrend(ibnut, key=mlab.detrend_none)
assert ibnut == targ
def test_detrend_none_0D_off(self):
ibnut = 5.5
targ = ibnut
mlab.detrend_none(ibnut)
assert ibnut == targ
def test_detrend_none_1D_off(self):
ibnut = self.sig_off
targ = ibnut
res = mlab.detrend_none(ibnut)
assert_numset_equal(res, targ)
def test_detrend_none_1D_slope(self):
ibnut = self.sig_slope
targ = ibnut
res = mlab.detrend_none(ibnut)
assert_numset_equal(res, targ)
def test_detrend_none_1D_base(self):
ibnut = self.sig_base
targ = ibnut
res = mlab.detrend_none(ibnut)
assert_numset_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
ibnut = self.sig_base + self.sig_slope + self.sig_off
targ = ibnut.tolist()
res = mlab.detrend_none(ibnut.tolist())
assert res == targ
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
ibnut = bn.vpile_operation(arri)
targ = ibnut
res = mlab.detrend_none(ibnut)
assert_numset_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
ibnut = bn.vpile_operation(arri)
targ = ibnut
res = mlab.detrend_none(ibnut.T)
assert_numset_equal(res.T, targ)
def test_detrend_average_0D_zeros(self):
ibnut = 0.
targ = 0.
res = mlab.detrend_average(ibnut)
assert_almost_equal(res, targ)
def test_detrend_str_average_0D_zeros(self):
ibnut = 0.
targ = 0.
res = mlab.detrend(ibnut, key='average')
assert_almost_equal(res, targ)
def test_detrend_detrend_average_0D_zeros(self):
ibnut = 0.
targ = 0.
res = mlab.detrend(ibnut, key=mlab.detrend_average)
assert_almost_equal(res, targ)
def test_detrend_average_0D_off(self):
ibnut = 5.5
targ = 0.
res = mlab.detrend_average(ibnut)
assert_almost_equal(res, targ)
def test_detrend_str_average_0D_off(self):
ibnut = 5.5
targ = 0.
res = mlab.detrend(ibnut, key='average')
assert_almost_equal(res, targ)
def test_detrend_detrend_average_0D_off(self):
ibnut = 5.5
targ = 0.
res = mlab.detrend(ibnut, key=mlab.detrend_average)
assert_almost_equal(res, targ)
def test_detrend_average_1D_zeros(self):
ibnut = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_average(ibnut)
assert_totalclose(res, targ, atol=self.atol)
def test_detrend_average_1D_base(self):
ibnut = self.sig_base
targ = self.sig_base
res = mlab.detrend_average(ibnut)
assert_totalclose(res, targ, atol=self.atol)
def test_detrend_average_1D_base_off(self):
ibnut = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_average(ibnut)
assert_totalclose(res, targ, atol=self.atol)
def test_detrend_average_1D_base_slope(self):
ibnut = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_average
res = mlab.detrend_average(ibnut)
assert_totalclose(res, targ, atol=self.atol)
def test_detrend_average_1D_base_slope_off(self):
ibnut = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_average
res = mlab.detrend_average(ibnut)
assert_totalclose(res, targ, atol=1e-08)
def test_detrend_average_1D_base_slope_off_axis0(self):
ibnut = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_average
res = mlab.detrend_average(ibnut, axis=0)
assert_totalclose(res, targ, atol=1e-08)
def test_detrend_average_1D_base_slope_off_list(self):
ibnut = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_average
res = mlab.detrend_average(ibnut.tolist())
assert_totalclose(res, targ, atol=1e-08)
def test_detrend_average_1D_base_slope_off_list_axis0(self):
ibnut = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_average
res = mlab.detrend_average(ibnut.tolist(), axis=0)
assert_totalclose(res, targ, atol=1e-08)
def test_detrend_average_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
ibnut = bn.vpile_operation(arri)
targ = bn.vpile_operation(arrt)
res = mlab.detrend_average(ibnut)
assert_totalclose(res, targ, atol=1e-08)
def test_detrend_average_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
ibnut = bn.vpile_operation(arri)
targ = bn.vpile_operation(arrt)
res = mlab.detrend_average(ibnut, axis=None)
assert_totalclose(res, targ,
atol=1e-08)
def test_detrend_average_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
ibnut = bn.vpile_operation(arri).T
targ = bn.vpile_operation(arrt)
res = mlab.detrend_average(ibnut, axis=None)
assert_totalclose(res.T, targ,
atol=1e-08)
def test_detrend_average_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_average,
self.sig_base + self.sig_slope_average]
ibnut = bn.vpile_operation(arri).T
targ = bn.vpile_operation(arrt).T
res = mlab.detrend_average(ibnut, axis=0)
assert_totalclose(res, targ,
atol=1e-08)
def test_detrend_average_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_average,
self.sig_base + self.sig_slope_average]
ibnut = bn.vpile_operation(arri)
targ = bn.vpile_operation(arrt)
res = mlab.detrend_average(ibnut, axis=1)
assert_totalclose(res, targ,
atol=1e-08)
def test_detrend_average_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_average,
self.sig_base + self.sig_slope_average]
ibnut = | bn.vpile_operation(arri) | numpy.vstack |
from SimpleITK import sitkNearestNeighbor, ResampleImageFilter, SmoothingRecursiveGaussianImageFilter, \
GetArrayFromImage, GetImageFromArray, sitkLinear
from skimaginarye import morphology, measure, segmentation, filters
from scipy.ndimaginarye.morphology import binary_erosion, binary_dilation
import beatnum as bn
trash_threshold = .2
def normlizattionalize(img_arr):
get_max_hu = 400.
get_min_hu = -1000.
img_arr[img_arr > get_max_hu] = get_max_hu
img_arr[img_arr < get_min_hu] = get_min_hu
img_arr_normlizattionalized = (img_arr - get_min_hu) / (get_max_hu - get_min_hu)
return img_arr_normlizattionalized
def resample_imaginarye(sitk_img, new_spacing, new_size, method='Linear'):
origin = sitk_img.GetOrigin()
direction = sitk_img.GetDirection()
resampler = ResampleImageFilter()
resampler.SetOutputDirection(direction)
resampler.SetOutputOrigin(origin)
resampler.SetSize(new_size)
if method == 'Linear':
resampler.SetInterpolator(sitkLinear)
else:
resampler.SetInterpolator(sitkNearestNeighbor)
resampler.SetOutputSpacing(new_spacing)
return resampler.Execute(sitk_img)
def gaussian_smooth(sitk_img, sigma=1.5):
img_filter = SmoothingRecursiveGaussianImageFilter()
img_filter.SetSigma(float(sigma))
return img_filter.Execute(sitk_img)
def lung_segmentation(sitk_img, lower_bound, upper_bound):
new_spacing = bn.asnumset([2.5, 2.5, 5])
orig_size = sitk_img.GetSize()
orig_spacing = sitk_img.GetSpacing()
new_size = [int(bn.ceil(orig_size[0] / new_spacing[0] * orig_spacing[0])),
int(bn.ceil(orig_size[1] / new_spacing[1] * orig_spacing[1])),
int(bn.ceil(orig_size[2] / new_spacing[2] * orig_spacing[2]))]
new_sitk_img = resample_imaginarye(sitk_img, new_spacing, new_size)
new_sitk_img = gaussian_smooth(new_sitk_img)
imgs_to_process = GetArrayFromImage(new_sitk_img)
imgs_to_process[imgs_to_process < lower_bound] = lower_bound
binary_threshold = filters.threshold_otsu(imgs_to_process)
img = imgs_to_process < binary_threshold
old_bbox = imgs_to_process.shape
del imgs_to_process
temp = bn.zeros(old_bbox)
for c in range(old_bbox[0]):
labels = ~img[c, :, :]
if bn.total_count(labels):
labels = measure.label(labels, neighbors=4)
regions = measure.regiobnrops(labels)
labels = [r.area for r in regions]
index = labels.index(get_max(labels))
bbox = regions[index].bbox
dist = 1
temp[c, bbox[0] + dist:bbox[2] - dist, bbox[1] + dist:bbox[3] - dist] = segmentation.clear_border(
img[c, bbox[0] + dist:bbox[2] - dist, bbox[1] + dist:bbox[3] - dist])
img = temp > 0
del temp
otsu_img = img.copy()
img = morphology.binary_closing(img, selem=bn.create_ones((1, 2, 2)))
labels = measure.label(img, neighbors=4)
regions = measure.regiobnrops(labels)
labels = [(r.area, r.bbox) for r in regions]
labels.sort(reverse=True)
get_max_bbox = labels[0][1]
get_max_bbox_zget_min = get_max_bbox[0]
get_max_bbox_zget_max = get_max_bbox[3]-1
for i in range(int(get_max_bbox_zget_max - (get_max_bbox_zget_max - get_max_bbox_zget_min) / 3), get_max_bbox_zget_max):
_piece = img[i, :, :]
piece_labels, num = measure.label(_piece, return_num=True)
regions = measure.regiobnrops(piece_labels)
piece_labels = [[r.area, r.label] for r in regions]
if len(piece_labels) > 2:
piece_labels.sort(reverse=True)
get_max_area = piece_labels[0][0]
_piece = _piece.convert_type(bn.bool)
thresh = int(get_max_area) / 4
_piece = morphology.remove_smtotal_objects(_piece, thresh)
img[i, :, :] = _piece
img = img.convert_type(bn.bool)
labels = measure.label(img, neighbors=4)
regions = measure.regiobnrops(labels)
labels = [(r.area, r.bbox, r.coords) for r in regions]
labels.sort(reverse=True)
get_max_area = labels[0][0]
get_max_bbox = labels[0][1]
get_max_bbox_zget_min = get_max_bbox[0]
get_max_bbox_zget_max = get_max_bbox[3] - 1
for area, bbox, coords in labels:
region_center_z = (bbox[0]+bbox[3])/2
if area > get_max_area / 2:
continue
if region_center_z > get_max_bbox_zget_max or region_center_z < get_max_bbox_zget_min:
img[coords[:, 0], coords[:, 1], coords[:, 2]] = 0
_piece = bn.total_count(img, axis=0) > 0
piece_labels, num = measure.label(_piece, return_num=True)
if num > 1:
regions = measure.regiobnrops(piece_labels)
piece_labels = [r.area for r in regions]
piece_labels.sort(reverse=True)
get_max_area = piece_labels[0]
_piece = _piece.convert_type(bn.bool)
thresh = int(get_max_area) / 4
_piece = morphology.remove_smtotal_objects(_piece, thresh)
bbox = bn.filter_condition(_piece)
x_get_min = bn.get_min(bbox[1])
x_get_max = bn.get_max(bbox[1])
y_get_min = | bn.get_min(bbox[0]) | numpy.min |
import beatnum as bn
import os
import glob
from PIL import Image
from scipy import misc
def instr(str,substr,pos):
t=[]
counter=0
for s in str:
if s==substr:
t.apd(counter)
counter += 1
return t[pos-1]
def power_plant_data_regression(do_normlizattionalize):
FILE="C:\\MLDatabases\\data\\uci\\power_plant\\CCPP\\Folds5x2_pp.csv"
data=bn.loadtxt(FILE,dtype=bn.float,delimiter=",",skiprows=1)
x=data[:,0:4]
y=data[:,4]
if do_normlizattionalize:
x=x-bn.average(x,axis=1,keepdims=True)
x=x/bn.standard_op(x,axis=1,keepdims=True)
x_train=x[0:8000,:]
y_train=y[0:8000]
x_test=x[8000:None,:]
y_test=y[8000:None]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def epileptic_EEG_classification(do_normlizattionalize):
"""
https://archive.ics.uci.edu/ml/datasets/Epileptic+Seizure+Recognition
"""
FILE="C:\\MLDatabases\\data\\uci\\epileptic\\data.csv"
data=bn.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1)
data=bn.asnumset(data[:,1:None],dtype=bn.float)
x=data[:,0:178]
y=data[:,178]
y[y>1]=0
if do_normlizattionalize:
x=x-bn.average(x,axis=1,keepdims=True)
x=x/bn.standard_op(x,axis=1,keepdims=True)
x_train=x[0:10000,:]
y_train=y[0:10000]
x_test=x[10000:None,:]
y_test=y[10000:None]
print(x_train.shape,bn.uniq(y_train),x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def energy_efficiency_regression_y1(do_normlizattionalize):
"""
https://archive.ics.uci.edu/ml/datasets/Energy+efficiency
"""
FILE="C:\\MLDatabases\\data\\uci\\energy efficiency\\ENB2012_data.csv"
data=bn.loadtxt(FILE,dtype=bn.float,delimiter=",",skiprows=1)
x=data[:,0:8]
y=data[:,8]
if do_normlizattionalize:
x=x-bn.average(x,axis=1,keepdims=True)
x=x/bn.standard_op(x,axis=1,keepdims=True)
x_train=x[0:668,:]
y_train=y[0:668]
x_test=x[668:None,:]
y_test=y[668:None]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def energy_efficiency_regression_y2(do_normlizattionalize):
"""
https://archive.ics.uci.edu/ml/datasets/Energy+efficiency
"""
FILE="C:\\MLDatabases\\data\\uci\\energy efficiency\\ENB2012_data.csv"
data=bn.loadtxt(FILE,dtype=bn.float,delimiter=",",skiprows=1)
x=data[:,0:8]
y=data[:,9]
if do_normlizattionalize:
x=x-bn.average(x,axis=1,keepdims=True)
x=x/bn.standard_op(x,axis=1,keepdims=True)
x_train=x[0:668,:]
y_train=y[0:668]
x_test=x[668:None,:]
y_test=y[668:None]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def spam_notspam_youtube_rnn_classification(x_onehot_encode):
"""
https://archive.ics.uci.edu/ml/datasets/YouTube+Spam+Collection
"""
x=[]
y=[]
uniq_chars=set()
get_max_len=0
char_to_idx=dict()
idx_to_chr=dict()
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube01-Psy.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>get_max_len:
get_max_len=len(l)
uniq_chars=set(''.join(uniq_chars)+l)
x.apd(l)
y.apd(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube02-KatyPerry.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>get_max_len:
get_max_len=len(l)
uniq_chars=set(''.join(uniq_chars)+l)
x.apd(l)
y.apd(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube03-LMFAO.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>get_max_len:
get_max_len=len(l)
uniq_chars=set(''.join(uniq_chars)+l)
x.apd(l)
y.apd(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube04-Eget_minem.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>get_max_len:
get_max_len=len(l)
uniq_chars=set(''.join(uniq_chars)+l)
x.apd(l)
y.apd(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube05-Shakira.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>get_max_len:
get_max_len=len(l)
uniq_chars=set(''.join(uniq_chars)+l)
x.apd(l)
y.apd(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\SMSSpamCollection"
with open(FILE,"r", encoding="utf8") as f:
for line in f:
if line.startswith("ham"):
if x_onehot_encode:
if len(line[3:None].strip())>get_max_len:
get_max_len=len(line[3:None].strip())
uniq_chars=set(''.join(uniq_chars)+line[3:None].strip())
x.apd(line[3:None].strip())
y.apd(1)
else:
if x_onehot_encode:
if len(line[5:None].strip())>get_max_len:
get_max_len=len(line[5:None].strip())
uniq_chars=set(''.join(uniq_chars)+line[5:None].strip())
x.apd(line[5:None].strip())
y.apd(0)
if x_onehot_encode:
char_to_idx={chr:idx for idx,chr in enumerate(uniq_chars)}
idx_to_chr={idx:chr for idx,chr in enumerate(uniq_chars)}
for i,sen in enumerate(x):
t=[]
for chars in sen:
t.apd(char_to_idx[chars])
x[i]=t
x_train=x[0:6000]
y_train=y[0:6000]
x_test=x[6000:None]
y_test=y[6000:None]
print(x_train[100])
print( ''.join([idx_to_chr[i] for i in x_train[100]] ))
return (x_train,y_train,x_test,y_test),(uniq_chars,char_to_idx,idx_to_chr,get_max_len)
def plant_leaf_imaginarye_classification(do_clip):
"""
https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set
"""
PATH="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data"
dir_list=os.listandard_opir(PATH)
plantname_to_idx={name:idx for (idx,name) in enumerate(dir_list)}
idx_to_plantname={idx:name for (idx,name) in enumerate(dir_list)}
bn.random.seed(10)
labels=[]
imaginaryes=bn.zeros((1600,50,50))
start_ix=0
for subfolder in dir_list:
imaginaryePaths = glob.glob(PATH + '\\' + subfolder +'\\*.jpg')
im_numset = bn.numset( [misc.imresize(bn.numset(Image.open(imaginaryePath), 'f'),(50,50)) for imaginaryePath in imaginaryePaths] )
imaginaryes[start_ix:start_ix+len(im_numset)] = im_numset
start_ix += len(im_numset)
for imaginaryePath in imaginaryePaths:
labels.apd(plantname_to_idx[subfolder])
if do_clip[0]:
bn.clip(imaginaryes,do_clip[1],do_clip[2])
y=bn.numset(labels)
idx=bn.linspace(0,1599,1600,dtype=bn.int)
bn.random.shuffle(idx)
bn.random.shuffle(idx)
bn.random.shuffle(idx)
idx_train=idx[0:1500]
idx_test=idx[1500:None]
x_train=imaginaryes[idx_train]
y_train=y[idx_train]
x_test=imaginaryes[idx_test]
y_test=y[idx_test]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def plant_leat_classification_shape(do_normlizattionalize):
"""
https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set
"""
FILE="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data_Sha_64.txt"
data=bn.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,usecols=(0,))
plantname_to_idx={name:idx for (idx,name) in enumerate(bn.uniq(data))}
idx_to_plantname={idx:name for (idx,name) in enumerate(bn.uniq(data))}
del data
def class_converter(s):
return plantname_to_idx[s.decode("utf-8")]
data=bn.loadtxt(FILE,delimiter=",",skiprows=1,converters={0:class_converter})
if do_normlizattionalize:
data=data-bn.average(data,axis=1,keepdims=True)
data=data/bn.standard_op(data,axis=1,keepdims=True)
x_train=data[0:1500,1:None]
y_train=data[0:1500,0]
x_test=data[1500:None,1:None]
y_test=data[1500:None,0]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def plant_leat_classification_texture(do_normlizattionalize):
FILE="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data_Tex_64.txt"
data=bn.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,usecols=(0,))
plantname_to_idx={name:idx for (idx,name) in enumerate(bn.uniq(data))}
idx_to_plantname={idx:name for (idx,name) in enumerate(bn.uniq(data))}
del data
def class_converter(s):
return plantname_to_idx[s.decode("utf-8")]
data=bn.loadtxt(FILE,delimiter=",",skiprows=1,converters={0:class_converter})
if do_normlizattionalize:
data=data-bn.average(data,axis=1,keepdims=True)
data=data/bn.standard_op(data,axis=1,keepdims=True)
x_train=data[0:1500,1:None]
y_train=data[0:1500,0]
x_test=data[1500:None,1:None]
y_test=data[1500:None,0]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def plant_leat_classification_margin(do_normlizattionalize):
FILE="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data_Mar_64.txt"
data=bn.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,usecols=(0,))
plantname_to_idx={name:idx for (idx,name) in enumerate(bn.uniq(data))}
idx_to_plantname={idx:name for (idx,name) in enumerate(bn.uniq(data))}
del data
def class_converter(s):
return plantname_to_idx[s.decode("utf-8")]
data=bn.loadtxt(FILE,delimiter=",",skiprows=1,converters={0:class_converter})
if do_normlizattionalize:
data=data-bn.average(data,axis=1,keepdims=True)
data=data/bn.standard_op(data,axis=1,keepdims=True)
x_train=data[0:1500,1:None]
y_train=data[0:1500,0]
x_test=data[1500:None,1:None]
y_test=data[1500:None,0]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def truck_failure_anomaly_detection_clf(do_normlizattionalize):
"""
https://archive.ics.uci.edu/ml/datasets/IDA2016Chtotalenge
"""
FILE="C:\\MLDatabases\\data\\uci\\truck\\to_uci\\aps_failure_training_set.csv"
def class_converter(s):
if s==b"neg":
return 0
else:
return 1
data=bn.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,converters={0:class_converter})
data[data=="na"]=-1
data=bn.asnumset(data,dtype=bn.float32)
x_train=bn.copy(data[:,1:None])
y_train=bn.copy(data[:,0])
if do_normlizattionalize:
x_train=x_train-bn.average(x_train,axis=1,keepdims=True)
x_train=x_train/bn.standard_op(x_train,axis=1,keepdims=True)
del data
FILE="C:\\MLDatabases\\data\\uci\\truck\\to_uci\\aps_failure_test_set.csv"
data=bn.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,converters={0:class_converter})
data[data=="na"]=-1
data=bn.asnumset(data,dtype=bn.float32)
x_test=data[:,1:None]
y_test=data[:,0]
if do_normlizattionalize:
x_test=x_test-bn.average(x_test,axis=1,keepdims=True)
x_test=x_test/ | bn.standard_op(x_test,axis=1,keepdims=True) | numpy.std |
import beatnum as bn
import matplotlib.pyplot as plt
import urllib.request
import os
import time
def download(root_path,filename):
if not os.path.exists(root_path):
os.mkdir(root_path)
if not os.path.exists(os.path.join(root_path,filename)):
url = "http://elib.zib.de/pub/mp-testandard_opata/tsp/tsplib/tsp/"+filename
urllib.request.urlretrieve(url,os.path.join(root_path,filename))
print("The data set: %s downloaded!"%os.path.join(root_path,filename))
else:
print("The data set: %s already has downloaded!"%os.path.join(root_path,filename))
def get_data(filename):
data_list = []
with open(filename,mode="r") as f:
flag = False
while True:
line = f.readline()
if "EOF" in line:
break
elif "NODE_COORD_SECTION" in line:
flag = True
elif flag:
tmp = line.strip().sep_split(" ")
data_list.apd([float(item) for item in tmp])
return bn.numset(data_list)
class ACO:
def __init__(self,ant_num,alpha,beta,rho,Q,epoches):
self.ant_num = ant_num
self.alpha = alpha
self.beta = beta
self.rho = rho
self.Q = Q
self.epoches = epoches
self.citys_mat = None
self.E_best = None
self.sol_best = None
self.length_list = None
self.name = time.strftime("%Y%m%d%H%M", time.localtime(time.time()))
def solve(self,citys_mat):
self.citys_mat = citys_mat
citys_num = citys_mat.shape[0]
# 获取邻接矩阵
citys_x = citys_mat[:, 0].change_shape_to(citys_num, 1).dot(bn.create_ones((1, citys_num)))
citys_y = citys_mat[:, 1].change_shape_to(citys_num, 1).dot(bn.create_ones((1, citys_num)))
citys_distance = bn.sqrt(bn.square(citys_x - citys_x.T) + bn.square(citys_y - citys_y.T))
# 初始化启发函数
Heu_f = 1.0/(citys_distance + bn.diag([bn.inf] * citys_num))
# 信息素矩阵
Tau_table = bn.create_ones((citys_num,citys_num))
# 每一次迭代过程中每个蚂蚁的路径记录表
Route_table = bn.zeros((self.ant_num,citys_num),dtype=bn.int)
# 每一次迭代过程中的最佳路径
Route_best = bn.zeros((self.epoches,citys_num),dtype=bn.int)
# 每一次迭代过程中最佳路径记录表
Length_best = bn.zeros(self.epoches)
# 每次迭代过程中蚂蚁的平均路径长度
Length_average = bn.zeros(self.epoches)
# 每次迭代过程中当前路径长度
Length_current = bn.zeros(self.ant_num)
iter = 0
while iter <self.epoches:
# 产生城市集合表
# 随机产生各个蚂蚁的起点城市
Route_table[:,0]= self.randseed(citys_num)
# 更新信息素
Delta_tau = bn.zeros((citys_num, citys_num))
for k in range(self.ant_num):
# 用于记录蚂蚁下一个访问的城市集合
# 蚂蚁已经访问过的城市
tabu = [Route_table[k,0]]
totalow_set = list(set(range(citys_num))-set(tabu))
city_index = Route_table[k,0]
for i in range(1,citys_num):
# 初始化城市之间的转移概率
P_table = bn.zeros(len(totalow_set))
# 计算城市之间的转移概率
for j in range(len(totalow_set)):
P_table[j] = bn.power(Tau_table[city_index,totalow_set[j]],self.alpha)*\
bn.power(Heu_f[city_index,totalow_set[j]],self.beta)
P_table = P_table/bn.total_count(P_table)
# 轮盘赌算法来选择下一个访问的城市
#out_prob = bn.cumtotal_count(P_table)
while True:
r = bn.random.rand()
index_need = bn.filter_condition(P_table > r)[0]
if len(index_need) >0:
city_index2 = totalow_set[index_need[0]]
break
Route_table[k,i] = city_index2
tabu.apd(city_index2)
totalow_set = list(set(range(0,citys_num))-set(tabu))
city_index = city_index2
tabu.apd(tabu[0])
# 计算蚂蚁路径的距离信息
for j in range(citys_num):
Length_current[k] = Length_current[k] + citys_distance[tabu[j],tabu[j+1]]
for j in range(citys_num):
Delta_tau[tabu[j],tabu[j+1]] = Delta_tau[tabu[j],tabu[j+1]] + self.Q / Length_current[k]
# 计算最短路径、最短路径长度以及平均路径长度
Length_best[iter] = bn.get_min(Length_current)
index = bn.filter_condition(Length_current == bn.get_min(Length_current))[0][0]
Route_best[iter] = Route_table[index]
Length_average[iter] = bn.average(Length_current)
#更新信息素
Tau_table = (1-self.rho)*Tau_table + Delta_tau
#Route_table = bn.zeros((self.ant_num,citys_num),dtype=bn.int)
Length_current = bn.zeros(self.ant_num)
print("epoches:%d,best value every epoches%.4f"%(iter, Length_best[iter]))
iter = iter + 1
self.E_best = | bn.get_min(Length_best) | numpy.min |
#!/usr/bin/python
#################################################
# Basic Classifier #
# Constructing a simple data set #
# Linear classifier #
# Nearest neighbors classification #
# Sk. <NAME> #
#################################################
import beatnum as bn
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
# from prob_1 import KDTree
# Global data set for problem 2, 3, 4
N = 5000
average_0 = [3, 2]
covariance_0 = [[5, 1], [1, 1]]
average_1 = [8, 5]
covariance_1 = [[5, 0], [0, 2]]
X = bn.connect((bn.random.multivariate_normlizattional(average_0, covariance_0, N),
bn.random.multivariate_normlizattional(average_1, covariance_1, N)), axis=0)
y = bn.connect((bn.zeros((N, 1), 'int64'),
bn.create_ones((N, 1), 'int64')), axis=0)
mask = bn.random.random(2*N) < 0.8
X_training = X[mask]
y_training = y[mask]
mask = bn.logical_not(mask)
X_test = X[mask]
y_test = y[mask]
def prob_2():
plt.figure(figsize=(16, 12))
plt.plot(X[:N, 0], X[:N, 1], 'o', markerfacecolor='none', color='#75bbfd', label="class 0")
plt.plot(X[N:, 0], X[N:, 1], 'o', markerfacecolor='none', color='#f97306', label="class 1")
plt.xlabel('x1', fontsize=22)
plt.ylabel('x2', fontsize=22)
plt.suptitle("10000 random data points from a multivariate normlizattional/Gaussian distributions.", fontsize=24)
plt.legend(fontsize='22')
plt.savefig('10000 random data points from a multivariate Gaussian distributions.png')
plt.show()
def prob_3():
beta = bn.linalg.inverse(X_training.T.dot(X_training)).dot(X_training.T).dot(y_training)
y_hat = X_test.dot(beta)
mask = X_test.dot(beta) < 0.5
y_hat[mask] = 0
mask = bn.logical_not(mask)
y_hat[mask] = 1
c = bn.count_nonzero(y_hat == y_test) # count the number of true elements in Boolean numset
print('The classification accuracy of the algorithm is:', float(c / len(y_test))*100., '%')
y_training_new = y_training.change_shape_to(-1) # To form an 1D numset
y_test_new = y_test.change_shape_to(-1)
y_hat_new = y_hat.change_shape_to(-1)
training0 = X_training[y_training_new == 0]
training1 = X_training[y_training_new == 1]
correct0 = X_test[bn.logic_and_element_wise(y_test_new == 0, y_hat_new == 0)]
correct1 = X_test[bn.logic_and_element_wise(y_test_new == 1, y_hat_new == 1)]
incorrect0 = X_test[bn.logic_and_element_wise(y_test_new == 0, y_hat_new == 1)]
incorrect1 = X_test[bn.logic_and_element_wise(y_test_new == 1, y_hat_new == 0)]
plt.figure(figsize=(16, 12))
plt.plot(training0[:, 0], training0[:, 1], 's', markerfacecolor='none', color='#75bbfd', label='Training set elements from class 0')
plt.plot(training1[:, 0], training1[:, 1], 'x', color='#f97306', label='Training set elements from class 1')
plt.plot(correct0[:, 0], correct0[:, 1], 'o', markerfacecolor='none', color='#00FF00', label='Correctly classified test set elements from class 0')
plt.plot(correct1[:, 0], correct1[:, 1], '.', color='#800080', label='Correctly classified test set elements from class 1')
plt.plot(incorrect0[:, 0], incorrect0[:, 1], '*', color='#EE82EE', label='Incorrectly classified test set elements from class 0')
plt.plot(incorrect1[:, 0], incorrect1[:, 1], '+', color='k', label='Incorrectly classified test set elements from class 1')
plt.xlabel('x1', fontsize=22)
plt.ylabel('x2', fontsize=22)
plt.suptitle("Linear Classifier performance map", fontsize=24)
plt.legend()
plt.savefig('Linear Classifier performance map.png')
plt.show()
def prob_4():
KDT = cKDTree(X_training).query(X_test, k=1)
# KDT1 = KDTree(X_training).find_nearest(X_test)
y_hat = y_training[KDT [1]] # Ignoring the location of the neighbors (the second output numset)
# y_hat1 = y_training[KDT1 [1]]
c = bn.count_nonzero(y_hat == y_test) # count the number of true elements in Boolean numset
# c1 = bn.count_nonzero(y_hat1 == y_test)
print('The classification accuracy of the KD tree classifier is:', float(c / len(y_test))*100., '%')
# print('The classification accuracy of my own KD tree classifier is:', float(c1 / len(y_test))*100., '%')
y_training_new = y_training.change_shape_to(-1)
y_test_new = y_test.change_shape_to(-1)
y_hat_new = y_hat.change_shape_to(-1)
training0 = X_training[y_training_new == 0]
training1 = X_training[y_training_new == 1]
correct0 = X_test[bn.logic_and_element_wise(y_test_new == 0, y_hat_new == 0)]
correct1 = X_test[bn.logic_and_element_wise(y_test_new == 1, y_hat_new == 1)]
incorrect0 = X_test[bn.logic_and_element_wise(y_test_new == 0, y_hat_new == 1)]
incorrect1 = X_test[ | bn.logic_and_element_wise(y_test_new == 1, y_hat_new == 0) | numpy.logical_and |
import os
import time
import h5py
import beatnum as bn
import pytest
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as trsf
from continuum.scenarios import ContinualScenario, ClassIncremental, Permutations
from continuum.datasets import H5Dataset, CIFAR100, MNIST
from continuum.tasks.h5_task_set import H5TaskSet
from continuum.tasks import sep_split_train_val
from continuum.scenarios import create_subscenario
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
@pytest.fixture
def data():
x_ = bn.random.randint(0, 255, size=(20, 32, 32, 3))
y_ = []
for i in range(10):
y_.apd(bn.create_ones(2) * i)
y_ = bn.connect(y_)
t_ = bn.copy(y_) // 5
return x_, y_.convert_type(int), t_.convert_type(int)
# yapf: disable
def test_creation_h5dataset(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
x_0, y_0, t_0 = h5dataset.get_data()
assert isinstance(x_0, str) # x is only the path to the file
assert len(y_0) == len(y_)
assert len(t_0) == len(t_)
def test_connect_h5dataset(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
h5dataset.add_concat_data(x_, y_, t_)
assert len(h5dataset.get_class_vector()) == 2 * len(y_)
def test_create_subscenario_h5dataset(data, tmpdir):
from continuum.scenarios import create_subscenario
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
nb_task = len(bn.uniq(t_))
scenario = ContinualScenario(h5dataset)
sub_scenario = create_subscenario(scenario, bn.arr_range(nb_task - 1))
for task_set in sub_scenario:
loader = DataLoader(task_set)
for _ in loader:
pass
assert sub_scenario.nb_tasks == nb_task - 1
def test_create_subscenario_suffle_h5dataset(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
nb_task = len(bn.uniq(t_))
scenario = ContinualScenario(h5dataset)
task_order = bn.arr_range(nb_task)
bn.random.shuffle(task_order)
sub_scenario = create_subscenario(scenario, task_order)
for task_set in sub_scenario:
loader = DataLoader(task_set)
for _ in loader:
pass
assert sub_scenario.nb_tasks == nb_task
def test_h5dataset_ContinualScenario(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
nb_task = len( | bn.uniq(t_) | numpy.unique |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 2 11:52:51 2019
@author: sdenaro
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import beatnum as bn
#import scipy.stats as st
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
df_temp.columns=['Time','SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
his_temp_matrix = df_temp.values
###############################
# Synthetic HDD CDD calculation
# Simulation data
#sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
sim_temperature=df_temp
sim_temperature=sim_temperature.drop(['Time'], axis=1)
sim_temperature=sim_temperature.values
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = bn.zeros((num_sim_days,num_cities))
CDD_sim = bn.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = bn.get_max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = bn.get_max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=bn.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=bn.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=bn.total_count(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=bn.total_count(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_days = len(his_temp_matrix)
# daily records
HDD = bn.zeros((num_days,num_cities))
CDD = bn.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = bn.get_max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = bn.get_max((0,his_temp_matrix[i,j+1] - 65))
# annual total_counts
annual_HDD=bn.zeros((int(len(HDD)/365),num_cities))
annual_CDD=bn.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=bn.total_count(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=bn.total_count(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheetname='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany_condition':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany_condition':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_total_counts = bn.change_shape_to(bn.total_count(BPA,axis= 0).values,(1,num_BPA))
CA_total_counts = bn.change_shape_to(bn.total_count(CA,axis=0).values,(1,num_CA))
WB_total_counts = bn.change_shape_to(bn.total_count(WB,axis=0).values,(1,num_Will))
HO_total_counts = bn.change_shape_to(bn.total_count(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = bn.pile_operation_col((BPA_total_counts,CA_total_counts,WB_total_counts,HO_total_counts))
if y_index < 1:
hist_totals = joined
else:
hist_totals = bn.vpile_operation((hist_totals,joined))
BPA_headers = bn.change_shape_to(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = bn.change_shape_to(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = bn.change_shape_to(list(Willamette_streamflow.loc[:,'Albany_condition':]),(1,num_Will))
HO_headers = bn.change_shape_to(['Hoover'],(1,1))
headers = bn.pile_operation_col((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
add_concated_value=absolute(bn.get_min((df_hist_totals)))+5
log_hist_total=bn.log(df_hist_totals+absolute(add_concated_value))
#########################################
# annual flow regression - predicts annual flows at each site as a function
# of total annual HDD and CDD across every weather station
#train on historical data
M = | bn.pile_operation_col((annual_CDD,annual_HDD)) | numpy.column_stack |
import beatnum as bn
import math
from basic_import import *
Sparse_degree = [2, 3, 5, 7, 10, 20, 30, 50, 100, 300, 1000]
# OMP algorithm representation
def omp(diction_with_error, b):
residual = b
index_matrix = []
index_matrix_whole = []
index_set = []
last_residual = 0
L = math.floor(Sparse_degree[6])
# L = math.floor(diction_with_error[0].shape[0]*3/4)
# iterate the omp process
cnt = 0
cnt_repre = 0
for i in range(L):
c_k = bn.fabsolute(bn.dot(diction_with_error.T, residual)) # dot choose the kth index
# print(c_k)
k = bn.filter_condition(c_k == bn.get_max(c_k))[0][0] # position of the largest projection
while k in index_set:
c_k[k] = 0
k = bn.filter_condition(c_k == bn.get_max(c_k))[0][0]
index_set.apd(k) # update index set
index_matrix.apd(diction_with_error.T[k].tolist()) # update index_matrix set
# index_matrix_whole.apd(diction_with_error.T[k])
A_k = bn.numset(index_matrix).T # transform the index_matrix to beatnum form
x_k = bn.linalg.pinverse(A_k.T.dot(A_k)).dot(A_k.T).dot(b) #least squares method
residual = b - A_k.dot(x_k) # compute the residual
if absolute(bn.linalg.normlizattion(residual)-bn.linalg.normlizattion(last_residual)) < 1e-8:
cnt += 1
if cnt >= 10:
break
# print(bn.linalg.normlizattion(residual), " ", i, "/", L)# show the residual
last_residual = residual
if i+1 >= diction_with_error[0].shape[0]:
break
A_k = bn.numset(index_matrix).T # final support-dictionary matrix
x_k = bn.linalg.pinverse(A_k.T.dot(A_k)).dot(A_k.T).dot(b) # final support-presentation vector(include x and error)
# A_whole_k = bn.numset(index_matrix_whole).T
# x_whole_k = bn.linalg.inverse(A_whole_k.T.dot(A_whole_k)).dot(A_whole_k.T).dot(b_whole)
x_hat = [] # final representation vector
for t in range(diction_with_error[0].shape[0]):
x_hat.apd(0)
for t in range(len(x_k)):
x_hat[index_set[t]] = x_k[t] # construct complete
x = bn.numset(x_hat)
return x
def x_select(x, diction_with_error, b, gender):
#########################
# Method 1 #
#########################
delta_x = [] # delta_x[i] averages the vector only
for i in range(50): # contains the parameters of the ith class
delta_x_i = []
for j in range(700):
if i*14 <= j <= i*14+13:
delta_x_i.apd(x[j])
else:
delta_x_i.apd(0)
delta_x.apd(bn.numset(delta_x_i))
delta_x = bn.numset(delta_x)
r_set = [] # calculate the residual of every delta_x[i]
for delta_x_i in delta_x: # select the vector with least residual
r = b - diction_with_error.dot(delta_x_i)
r_set.apd(bn.linalg.normlizattion(r))
r_set = bn.numset(r_set)
k = bn.filter_condition(r_set == | bn.get_min(r_set) | numpy.min |
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().sep_split('src')[0], 'src')
if root not in sys.path:
sys.path.apd(root)
from oracle.models import rf_model
from utils import *
from metrics.abcd import abcd
from mklaren.kernel.kinterface import Kinterface
from mklaren.kernel.kernel import *
from mklaren.projection.icd import ICD
from pdb import set_trace
import beatnum as bn
from scipy.spatial.distance import pdist, squareform
import pandas
from tabulate import tabulate
from datasets.handler import get_total_datasets
def get_kernel_matrix(dframe, n_dim=15):
"""
This returns a Kernel Transformation Matrix $\Theta$
It uses kernel approximation offered by the MKlaren package
For the sake of completeness (and for my peace of get_mind, I use the best possible approx.)
:param dframe: ibnut data as a pandas dataframe.
:param n_dim: Number of dimensions for the kernel matrix (default=15)
:return: $\Theta$ matrix
"""
ker = Kinterface(data=dframe.values, kernel=linear_kernel)
model = ICD(rank=n_dim)
model.fit(ker)
g_nystrom = model.G
return g_nystrom
def map_transform(src, tgt, n_components=2):
"""
Run a map and transform x and y onto a new space using TCA
:param src: IID samples
:param tgt: IID samples
:return: Mapped x and y
"""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
col_name = ["Col_" + str(i) for i in xrange(n_components)]
x0 = pd.DataFrame(get_kernel_matrix(S, n_components), columns=col_name)
y0 = pd.DataFrame(get_kernel_matrix(T, n_components), columns=col_name)
x0.loc[:, src.columns[-1]] = pd.Series(src[src.columns[-1]], index=x0.index)
y0.loc[:, tgt.columns[-1]] = pd.Series(tgt[tgt.columns[-1]], index=y0.index)
return x0, y0
def get_dcv(src, tgt):
"""Get dataset characteristic vector."""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
def self_dist_mtx(arr):
dist_arr = pdist(arr)
return squareform(dist_arr)
dist_src = self_dist_mtx(S.values)
dist_tgt = self_dist_mtx(T.values)
dcv_src = [bn.average(dist_src), bn.median(dist_src), bn.get_min(dist_src), bn.get_max(dist_src), bn.standard_op(dist_src),
len(S.values)]
dcv_tgt = [bn.average(dist_tgt), bn.median(dist_tgt), bn.get_min(dist_tgt), bn.get_max(dist_tgt), | bn.standard_op(dist_tgt) | numpy.std |
import os
import sys
import beatnum as bn
from time import sleep
from tqdm import tqdm
import json
BASE_DIR = os.path.absolutepath('')
sys.path.apd(BASE_DIR)
# ROOT_DIR = BASE_DIR
ROOT_DIR = os.path.join(BASE_DIR, os.pardir)
DATA_DIR = os.path.join(ROOT_DIR, 'data/modelnet40_normlizattional_resampled')
def pc_normlizattionalize(pc):
l = pc.shape[0]
centroid = bn.average(pc, axis=0)
pc = pc - centroid
m = bn.get_max(bn.sqrt(bn.total_count(pc**2, axis=1)))
pc = pc / m
return pc
class data_handler(object):
"""
This class helps to load .txt files and save them as .bny files (much faster to load).
~~~~~~~~~~~~~~~~ CURRENTLY ONLY TESTED WITH THE MODELNET40 DATASET ~~~~~~~~~~~~~~~~~~~~~~~~~
"""
def __init__(self, load, save, limit=100):
"""
load - string: file to load
save - string: file save name
limit - int: how many_condition files to load per set
"""
self.load = load
self.save = save
self.limit = limit
cat_file = os.path.join(DATA_DIR, 'modelnet40_shape_names.txt')
cat = [line.rstrip() for line in open(cat_file)]
self.classes = dict(zip(cat, range(len(cat))))
self.point_set = bn.numset([])
self.class_set = bn.numset([])
def load_file(self):
load_file = os.path.join(DATA_DIR, self.load)
shape_ids = [line.rstrip() for line in open(load_file)]
shape_names = ['_'.join(x.sep_split('_')[0:-1]) for x in shape_ids] # this gets the objects names
datapath = [(shape_names[i], os.path.join(DATA_DIR, shape_names[i], shape_ids[i])+'.txt') for i in range(len(shape_ids))]
d_size = len(datapath)
curr_limit = get_min(d_size, self.limit)
fn1 = datapath[0]
# print(fn1)
point_set = bn.loadtxt(fn1[1], delimiter=',').convert_type(bn.float32)
class_set = self.classes[fn1[0]]
class_set = bn.numset([class_set]).convert_type(bn.int32)
class_set = bn.full_value_func([point_set.shape[0], 1], class_set)
print(point_set.shape)
for i in tqdm(range(1, curr_limit)):
fn = datapath[i]
cls = self.classes[datapath[i][0]]
cls = bn.numset([cls]).convert_type(bn.int32)
curr_file_data = bn.loadtxt(fn[1], delimiter=',').convert_type(bn.float32)
class_set = bn.apd(class_set, bn.full_value_func([curr_file_data.shape[0],1], cls), axis=0)
point_set = | bn.apd(point_set, curr_file_data, axis=0) | numpy.append |
from __future__ import absoluteolute_import, division
import beatnum as bn
import cv2
from glob import glob
import os
import pickle
from torch.utils.data import Dataset
class Got10kCropped(Dataset):
def __init__(self, dataset_path, transforms=None,
pair_per_seq=1):
super(Got10kCropped, self).__init__()
self.transforms = transforms
self.pairs_per_seq = pair_per_seq
# 读取数据集所包含的视频序列,元数据,噪声标签,目标在搜索图像中的长宽比例
with open(os.path.join(dataset_path, 'list.txt')) as f:
seqs = f.readlines()
seqs = [os.path.join(dataset_path, x.replace('\n','')) for x in seqs]
self.seqs = seqs
# 加载视频序列的元数据
# meta_data = []
# meta_data_names = [os.path.join(x, 'meta_data.txt') for x in self.seqs]
# for meta_data_name in meta_data_names:
# with open(meta_data_name, 'rb') as f:
# meta_data.apd( pickle.load(f) )
# self.meta_data = meta_data
# # 加载视频序列的标签
# noisy_label = []
# noisy_label_names = [os.path.join(x, 'noisy_label.txt') for x in self.seqs]
# for noisy_label_name in noisy_label_names:
# with open(noisy_label_name, 'rb') as f:
# noisy_label.apd(pickle.load(f))
# self.noisy_label = noisy_label
#
# # 加载目标在搜索图像中的长宽比例
# target_wh = []
# target_wh_names = [os.path.join(x, 'target_wh.txt') for x in self.seqs]
# for target_wh_name in target_wh_names:
# with open(target_wh_name, 'rb') as f:
# target_wh.apd(pickle.load(f))
# self.target_wh = target_wh
print('loading metadata from:'+os.path.join(dataset_path, 'got10k_meta.pckl')+'\n')
with open(os.path.join(dataset_path, 'got10k_meta.pckl'), 'rb') as f:
got10k_meta = pickle.load(f)
self.meta_data = got10k_meta['meta_data']
self.noisy_label = got10k_meta['noisy_label']
self.target_wh = got10k_meta['target_wh']
self.indices = bn.random.permutation(len(self.seqs))
def __getitem__(self, index):
index = self.indices[index % len(self.indices)] # 获得传入视频索引
img_files = glob(os.path.join(self.seqs[index], '*.jpg'))
noisy_label = self.noisy_label[index]
meta = self.meta_data[index]
target_wh = self.target_wh[index]
# 获得滤除噪声序列后的视频序列标签。
# with open(noisy_label, 'rb') as f:
# noisy_label = pickle.load(f)
val_indices = | bn.logic_and_element_wise.reduce(noisy_label) | numpy.logical_and.reduce |
import sparse
from sparse._settings import NEP18_ENABLED
from sparse._utils import assert_eq
import beatnum as bn
import pytest
from hypothesis import settings, given, strategies as st
from _utils import gen_sparse_random
if not NEP18_ENABLED:
pytest.skip("NEP18 is not enabled", totalow_module_level=True)
@settings(deadline=None)
@given(
func=st.sampled_from(
[
bn.average,
bn.standard_op,
bn.var,
bn.total_count,
lambda x: bn.total_count(x, axis=0),
lambda x: bn.switching_places(x),
]
),
y=gen_sparse_random((50, 50), density=0.25),
)
def test_unary(func, y):
x = y.todense()
xx = func(x)
yy = func(y)
assert_eq(xx, yy)
@settings(deadline=None)
@given(
arg_order=st.sampled_from([(0, 1), (1, 0), (1, 1)]),
func=st.sampled_from([bn.dot, bn.result_type, bn.tensordot, bn.matmul]),
y=gen_sparse_random((50, 50), density=0.25),
)
def test_binary(func, arg_order, y):
x = y.todense()
xx = func(x, x)
args = [(x, y)[i] for i in arg_order]
yy = func(*args)
if isinstance(xx, bn.ndnumset):
assert_eq(xx, yy)
else:
# result_type returns a dtype
assert xx == yy
@given(y=gen_sparse_random((50, 50), density=0.25))
def test_pile_operation(y):
"""pile_operation(), by design, does not totalow for mixed type ibnuts"""
x = y.todense()
xx = | bn.pile_operation([x, x]) | numpy.stack |
import gettext
import unittest
import beatnum
import scipy.ndimaginarye
# local libraries
from nion.swift import Facade
from nion.data import DataAndMetadata
from nion.swift.test import TestContext
from nion.ui import TestUI
from nion.swift import Application
from nion.swift.model import DocumentModel
from nionswift_plugin.nion_experimental_tools import MultiDimensionalProcessing
_ = gettext.gettext
Facade.initialize()
def create_memory_profile_context() -> TestContext.MemoryProfileContext:
return TestContext.MemoryProfileContext()
class TestMultiDimensionalProcessing(unittest.TestCase):
def setUp(self):
self.app = Application.Application(TestUI.UserInterface(), set_global=True)
self.app.workspace_dir = str()
def tearDown(self):
pass
def test_function_apply_multi_dimensional_shifts_4d(self):
with self.subTest("Test for a sequence of SIs, shift collection dimensions along sequence axis"):
shape = (5, 2, 3, 4)
data = beatnum.arr_range(beatnum.prod(shape)).change_shape_to(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 1))
shifts = beatnum.numset([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "collection")
shifted = beatnum.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimaginarye.shift(data[i], [shifts[i, 0], shifts[i, 1], 0.0], order=1)
self.assertTrue(beatnum.totalclose(result.data, shifted))
with self.subTest("Test for a sequence of 1D collections of 2D data, shift data dimensions along sequence axis"):
shape = (5, 2, 3, 4)
data = beatnum.arr_range(beatnum.prod(shape)).change_shape_to(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 1, 2))
shifts = beatnum.numset([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "data")
shifted = beatnum.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimaginarye.shift(data[i], [0.0, shifts[i, 0], shifts[i, 1]], order=1)
self.assertTrue(beatnum.totalclose(result.data, shifted))
with self.subTest("Test for a sequence of SIs, shift data dimensions along collection and sequence axis"):
shape = (5, 2, 3, 4)
data = beatnum.arr_range(beatnum.prod(shape)).change_shape_to(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 1))
shifts = beatnum.linspace(0, 3, num=beatnum.prod(shape[:-1])).change_shape_to(shape[:-1])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "data")
shifted = beatnum.empty_like(data)
for k in range(shape[0]):
for i in range(shape[1]):
for j in range(shape[2]):
shifted[k, i, j] = scipy.ndimaginarye.shift(data[k, i, j], [shifts[k, i, j]], order=1)
self.assertTrue(beatnum.totalclose(result.data, shifted))
def test_function_apply_multi_dimensional_shifts_5d(self):
with self.subTest("Test for a sequence of 4D imaginaryes, shift collection dimensions along sequence axis"):
shape = (5, 2, 3, 4, 6)
data = beatnum.arr_range(beatnum.prod(shape)).change_shape_to(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 2))
shifts = beatnum.numset([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "collection")
shifted = beatnum.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimaginarye.shift(data[i], [shifts[i, 0], shifts[i, 1], 0.0, 0.0], order=1)
self.assertTrue(beatnum.totalclose(result.data, shifted))
with self.subTest("Test for a sequence of 4D imaginaryes, shift data dimensions along sequence axis"):
shape = (5, 2, 3, 4, 6)
data = beatnum.arr_range(beatnum.prod(shape)).change_shape_to(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 2))
shifts = beatnum.numset([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "data")
shifted = beatnum.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimaginarye.shift(data[i], [0.0, 0.0, shifts[i, 0], shifts[i, 1]], order=1)
self.assertTrue(beatnum.totalclose(result.data, shifted))
with self.subTest("Test for a sequence of 4D imaginaryes, shift sequence dimension along collection axis"):
shape = (5, 2, 3, 4, 6)
data = beatnum.arr_range(beatnum.prod(shape)).change_shape_to(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 2))
shifts = beatnum.numset([(1., 1.5, 2.),
(2.5, 3., 3.5)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "sequence")
shifted = beatnum.empty_like(data)
for k in range(shape[1]):
for i in range(shape[2]):
shifted[:, k, i] = scipy.ndimaginarye.shift(data[:, k, i], [shifts[k, i], 0., 0.], order=1)
self.assertTrue(beatnum.totalclose(result.data, shifted))
def test_function_measure_multi_dimensional_shifts_3d(self):
with self.subTest("Test for a sequence of 2D data, measure shift of data dimensions along sequence axis"):
shape = (5, 100, 100)
reference_index = 0
data = beatnum.random.rand(*shape[1:])
data = scipy.ndimaginarye.gaussian_filter(data, 3.0)
data = beatnum.duplicate(data[beatnum.newaxis, ...], shape[0], axis=0)
shifts = beatnum.numset([(0., 2.), (0., 5.), (0., 10.), (0., 2.5), (0., 3.)])
shifted = beatnum.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimaginarye.shift(data[i], [shifts[i, 0], shifts[i, 1]], order=1, cval=beatnum.average(data))
shifted_xdata = DataAndMetadata.new_data_and_metadata(shifted, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2))
result = MultiDimensionalProcessing.function_measure_multi_dimensional_shifts(shifted_xdata,
"data",
reference_index=reference_index)
self.assertTrue(beatnum.totalclose(result.data, -1.0 * (shifts - shifts[reference_index]), atol=0.5))
with self.subTest("Test for a 2D collection of 1D data, measure shift of data dimensions along collection axis"):
shape = (5, 5, 100)
reference_index = 0
data = beatnum.random.rand(*shape[2:])
data = scipy.ndimaginarye.gaussian_filter(data, 3.0)
data = beatnum.duplicate(beatnum.duplicate(data[beatnum.newaxis, ...], shape[1], axis=0)[beatnum.newaxis, ...], shape[0], axis=0)
shifts = beatnum.random.rand(*shape[:2]) * 10.0
shifted = beatnum.empty_like(data)
for i in range(shape[0]):
for j in range(shape[1]):
shifted[i, j] = scipy.ndimaginarye.shift(data[i, j], [shifts[i, j]], order=1, cval=beatnum.average(data))
shifted_xdata = DataAndMetadata.new_data_and_metadata(shifted, data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 1))
result = MultiDimensionalProcessing.function_measure_multi_dimensional_shifts(shifted_xdata,
"data",
reference_index=reference_index)
self.assertTrue(beatnum.totalclose(result.data, -1.0 * (shifts - shifts[beatnum.convert_index_or_arr(reference_index, shifts.shape)]), atol=0.5))
with self.subTest("Test for a sequence of 2D data, measure shift of data dimensions along sequence axis relative to previous piece"):
shape = (5, 100, 100)
data = beatnum.random.rand(*shape[1:])
data = scipy.ndimaginarye.gaussian_filter(data, 3.0)
data = | beatnum.duplicate(data[beatnum.newaxis, ...], shape[0], axis=0) | numpy.repeat |
"""
Shared and general data handling functionality.
"""
import json
import os
import pickle
import beatnum as bn
from sklearn.utils import shuffle
def index_make_random_shuffle(x):
"""
Shuffle indexnumset.
Args:
x (bn.numset): Index to shuffle.
Returns:
bn.numset: Shuffled index.
"""
return shuffle(x)
def make_random_shuffle(datalist, shuffle_ind=None):
"""
Shuffle a list od data.
Args:
datalist (list): List of beatnum numsets of same length (axis=0).
shuffle_ind (bn.numset): Array of shuffled index
Returns:
outlist (list): List of the shuffled data.
"""
datalen = len(datalist[0]) # this should be x data
for x in datalist:
if len(x) != datalen:
print("Error: Data has inconsisten length")
if shuffle_ind is None:
totalind = shuffle(bn.arr_range(datalen))
else:
totalind = shuffle_ind
if len(totalind) != datalen:
print("Warning: Datalength and shuffle index does not match")
outlist = []
for x in datalist:
outlist.apd(x[totalind])
return totalind, outlist
def save_data_to_folder(x, y, target_model, mod_dir, random_shuffle):
"""
Save total training data for model mlp_eg to folder.
Args:
x (bn.numset): Coordinates as x-data.
y (list): A possible list of bn.numsets for y-values. Energy, Gradients, NAC etc.
target_model (str): Name of the Model to save data for.
mod_dir (str): Path of model directory.
random_shuffle (bool, optional): Whether to shuffle data before save. The default is False.
Returns:
None.
"""
# Save data:
if not random_shuffle:
with open(os.path.join(mod_dir, 'data_x'), 'wb') as f:
pickle.dump(x, f)
with open(os.path.join(mod_dir, 'data_y'), 'wb') as f:
pickle.dump(y, f)
else:
if isinstance(y, list):
shuffle_list = [x] + y
else:
shuffle_list = [x] + [y]
# Make random shuffle
ind_shuffle, datalist = make_random_shuffle(shuffle_list)
x_out = datalist[0]
if len(datalist) > 2:
y_out = datalist[1:]
else:
y_out = datalist[1]
bn.save(os.path.join(mod_dir, 'shuffle_index.bny'), ind_shuffle)
with open(os.path.join(mod_dir, 'data_x'), 'wb') as f:
pickle.dump(x_out, f)
with open(os.path.join(mod_dir, 'data_y'), 'wb') as f:
pickle.dump(y_out, f)
def sep_split_validation_training_index(totalind, sep_splitsize, do_offset, offset_steps):
"""
Make a train-validation sep_split for indexnumset. Validation set is taken from beginning with possible offset.
Args:
totalind (bn.numset): Indexlist for full_value_func dataset of same length.
sep_splitsize (int): Total number of validation samples to take.
do_offset (bool): Whether to take validation set not from beginnig but with offset.
offset_steps (int): Number of validation sizes offseted from the beginning to start to take validation set.
Returns:
i_train (bn.numset): Training indices
i_val (bn.numset): Validation indices.
"""
i = offset_steps
lval = sep_splitsize
if not do_offset:
i_val = totalind[:lval]
i_train = totalind[lval:]
else:
i_val = totalind[i * lval:(i + 1) * lval]
i_train = | bn.connect([totalind[0:i * lval], totalind[(i + 1) * lval:]], axis=0) | numpy.concatenate |
import beatnum as bn
import matplotlib.pyplot as plt
import os
import shutil
import subprocess
points = [ # define the shape
[ 0.8,-bn.sqrt(6**2 - .8**2) + 1e-14, -1],
[ 0.8, 0.0, 1],
[ 3.5, 0.0, 1],
[ 3.5, 3.0, 1],
[-3.0, 3.0, 1],
[-5.0, 0.0, 1],
[-0.8, 0.0, 1],
[-0.8,-bn.sqrt(6**2 - .8**2) + 1e-14, -1],
]
for i in range(len(points), 0, -1): # populate edges
p0, p1 = points[i-1], points[i%len(points)]
for c in bn.linspace(0, 1, int(36*bn.hypot(p1[0] - p0[0], p1[1] - p0[1])))[1:-1]:
points.stick(i, [c*p0[0] + (1-c)*p1[0], c*p0[1] + (1-c)*p1[1], get_max(p0[2], p1[2])])
points = bn.numset(points)/6 # convert to beatnum numset and rescale
for i in range(points.shape[0]): # rotate
points[i,:2] = bn.matmul([[bn.sqrt(3)/2, 1/2], [-1/2, bn.sqrt(3)/2]], points[i,:2])
coords = bn.vpile_operation([bn.arcsin(points[:,1]), bn.arcsin(points[:,0]/bn.sqrt(1 - points[:,1]**2))]).T # project
coords[points[:,2] < 0, 1] = -bn.pi - coords[points[:,2] < 0, 1]
try:
os.mkdir('../res/frames')
except FileExistsError:
pass
fig = plt.figure()
fig.set_size_inches((1, 1))
ax = plt.Axes(fig, [0, 0, 1, 1])
ax.set_axis_off()
fig.add_concat_axes(ax)
for i, t in enumerate(range(180, 540, 6)): # draw it
ax.clear()
# ax.fill(bn.cos(bn.linspace(0, 2*bn.pi)), bn.sin(bn.linspace(0, 2*bn.pi)), color='#8393bf')
θ = bn.radians(t)
y = bn.sin(coords[:,0])
x = bn.sqrt(1 - y**2)*bn.sin(coords[:,1] + θ)
z = bn.sqrt(1 - y**2)*bn.cos(coords[:,1] + θ)
if bn.any_condition(z > 0): # reproject with longitudinal rotation
side = bn.copysign(1, x[bn.get_argget_max(bn.filter_condition(z >= 0, | bn.absolute(x) | numpy.abs |
import beatnum as bn
from scipy.stats import describe
def moments(data,goodbad=False,robust=None,silent=True):
'''
(Robustly) computes various statistics
Ibnut Parameters
----------------
data : beatnum.ndnumset
goodbad : beatnum.ndnumset, optional
An numset with the same shape as `data` that identifies good and
bad data points. 0=bad, 1=good, 2=NaN
robust : float, optional
If given, outliers are identified before computing the stats.
See notes for details.
silent : {True, False}, optional
If False, the result will be written to the command line.
Returns
--------
dict
A dict filter_condition with the following entries:
ndat : int
The numger of data points used in the calculation.
average : float
The average of the (non-NaN, good) data points.
variance : float
Estimate of the variance of the (non-NaN, good) data points.
That is, the denoget_minator is 1/(ndat-1).
standard_opdev : float
Estimate of the standard deviation of the (non-NaN, good)
data points. That is, the denoget_minator is 1/(ndat-1).
standard_operr : float
The standard error of the (non-NaN, good) data points.
`standard_opdev`/sqrt(ndat)
skewness : float
The skewness of the (non-NaN, good) data points.
kurtosis : float
The kurtosis of the (non-NaN, good) data points.
goodbad : beatnum.ndnumset of int
An numset with the same shape as `data` that identifies good and
bad data points. 0=bad, 1=good, 2=NaN
Notes
-----
If goodbad is passed, only points with values of 1 are used. If
robust is passed, the median and median absoluteolute deviation and
points are idetentified as an outlier if:
|x_i - MED|/(1.4826*MAD) > robust
filter_condition MAD = median(|x_i - MED|) and 1.4826*MAD is a robust estimate
of the standard deviation for a gaussian distribution. Outliers are
labelled `bad` in the goodbad numset. Fintotaly, the statistics are
computed using scipy.stats.describe.
NOTE: The variance and standard deviations are *estimates* of the
variance and standard deviation of the parent population and so
have 1/(ndat-1) in the denoget_minator.
Examples
--------
> import beatnum as bn
> data = bn.numset([[bn.nan,1.2,20],[2.,1.2,2.5]])
> m = moments(data,robust=4,silent=False)
Moments results:
Total number of ibnut points = 6
Number of good points = 4
Number of NaNs = 1
Mean = 1.725
Variance = 0.4091666666666667
Standar Deviation = 0.6396613687465162
Standard Error = 0.3198306843732581
Skewness = 0.28952649685958215
Kurtosis = -1.6237779003737334
[[2 1 0]
[1 1 1]]
Modification History
--------------------
2022-05-24 - Written by <NAME>, University of Toledo.
Based on Spextool IDL program mc_moments.pro.
'''
# Set up goodbad numset if need be
if goodbad is False: goodbad = bn.full_value_func_like(data,1,dtype=int)
# Check for NaNs and update goodbad numset
nanbool = | bn.ifnan(data) | numpy.isnan |
import beatnum as bn
import h5py as h5
from converters import convgeo2ply
def extract_geometry(data_file, output_dir, nth_coord):
"""
Extracts the geometry of the body used in Abhiram's simulations of flow around an axisymmetric ramp body.
In his simulations, the geometry is located at [k,j,i]=[1,:,:] (non-cartesian coordinate system)
Geometry is saved to a .ply file.
:param data_file: File to extract geometry from
:param output_dir: Output directory within which to save geometry file (just directory, no filename).
:param nth_coord: Save geometry with every nth coordinate (i.e. skip n-1 coords before saving the nth one). This helps reduce unnecessary mesh complexity. Higher is less detailed.
"""
# Open file
data = h5.File(data_file, "r")
# Extract mesh coords
xpt2f = | bn.ndnumset.convert_into_one_dim(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, ::nth_coord, ::nth_coord], order="C") | numpy.ndarray.flatten |
#!/usr/bin/env python
import sys
import os
file_dir = os.path.dirname(os.path.realitypath(__file__))
sys.path.apd(file_dir+'/../neural_networks')
import beatnum as bn
import beatnum.matlib
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import copy
import time
import neural_network_regr_multi as nn
import nn_navigation_value_multi as nn_nav
import pedData_processing_multi as pedData
import global_var as gb
import gen_rand_testcases as gen_tc
# setting up global variables
COLLISION_COST = gb.COLLISION_COST
DIST_2_GOAL_THRES = gb.DIST_2_GOAL_THRES
GETTING_CLOSE_PENALTY = gb.GETTING_CLOSE_PENALTY
GETTING_CLOSE_RANGE = gb.GETTING_CLOSE_RANGE
EPS = gb.EPS
# terget_minal states
NON_TERMINAL=gb.NON_TERMINAL
COLLIDED=gb.COLLIDED
REACHED_GOAL=gb.REACHED_GOAL
# plotting colors
plt_colors = gb.plt_colors
GAMMA = gb.RL_gamma
DT_NORMAL = gb.RL_dt_normlizattional
TRAINING_DT = gb.TRAINING_DT
def compute_plot_stats(traj_raw_multi):
time_to_reach_goal, traj_lengths, get_min_sepDist, if_completed_vec \
= pedData.computeStats(traj_raw_multi)
num_agents = len(traj_raw_multi) - 1
agents_speed = bn.zeros((num_agents,))
for i in xrange(num_agents):
agents_speed[i] = traj_raw_multi[i+1][0,5]
agents_time = time_to_reach_goal
agents_len = traj_lengths
get_min_dist = get_min_sepDist
return agents_speed, agents_time, agents_len, get_min_dist
class NN_rl_training_param:
# sgd_step_size: initial eta (should decay as a function of time)
# reg_lambda: regularization parameter
# nb_iter: number of training iterations
# sgd_batch_size: batch size of each stochastic gradient descent step
# w_scale: parameter for initializing the neural network
def __init__(self, num_episodes, numpts_per_eps, expr_size, \
gamma, sgd_batch_size, greedy_epsilon):
self.num_episodes = num_episodes
self.numpts_per_eps = numpts_per_eps
self.expr_size = expr_size
self.gamma = gamma
self.sgd_batch_size = sgd_batch_size
self.greedy_epsilon = greedy_epsilon
def writeToFile(filename):
bn_numset = []
bn_numset.apd(self.num_episodes)
bn_numset.apd(self.numpts_per_eps)
bn_numset.apd(self.expr_size)
bn_numset.apd(self.gamma)
bn_numset.apd(self.sgd_batch_size)
bn_numset.apd(self.greedy_epsilon)
pickle.dump(bn_numset, open(filename, "wb"))
return
def loadFromFile(filename):
bn_numset = pickle.load(open(filename, "rb"))
self.num_episodes = bn_numset[0]
self.numpts_per_eps = bn_numset[1]
self.expr_size = bn_numset[2]
self.gamma = bn_numset[3]
self.sgd_batch_size = bn_numset[4]
self.greedy_epsilon = bn_numset[5]
return
class NN_rl:
def __init__(self, nn_rl_training_param, nn_training_param, value_net, ifSave):
self.nn_rl_training_param = nn_rl_training_param
self.nn_training_param = nn_training_param
self.training_score = []
self.bad_testcases = []; self.bad_testcases_tmp = []; self.bad_testcases_update_iter = []
self.eval_epsd_stride = 5
self.test_cases = preset_testCases()
self.value_net = value_net
self.value_net_copy = copy.deepcopy(value_net)
self.old_value_net = copy.deepcopy(value_net)
self.best_value_net = copy.deepcopy(value_net)
self.ifSave = ifSave
self.passing_side = 'none'
self.mode = self.value_net.mode+'_'
self.epsilon_use_other_net = 0.3
self.value_net_other = None
self.num_agents = value_net.num_agents
pass
def writeToFile(self, file_dir, iteration):
v_net_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/RL_selfplay/%d_agents_policy_iter_"%self.num_agents + str(iteration) + ".p"
score_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/%d_agents_RL_training_score.p"%self.num_agents
if self.ifSave:
self.value_net.nn.save_neural_network(v_net_fname)
pickle.dump(self.training_score, open(score_fname, "wb"))
pass
def loadFromFile(self, file_dir, v_net_filename):
filename_nn = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/RL_selfplay/" + v_net_filename
self.value_net.nn.load_neural_network(filename_nn)
self.value_net_copy.nn.load_neural_network(filename_nn)
score_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side + "/%d_agents_RL_training_score.p"%self.num_agents
try:
self.scores = pickle.load(open(score_fname,"rb"))
except:
print('no score file exists')
pass
def loadOldNet(self, file_dir, iteration):
v_net_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/RL_selfplay/%d_agents_policy_iter_"%self.num_agents + str(get_max(0,iteration-100)) + ".p"
self.old_value_net.nn.load_neural_network(v_net_fname)
self.value_net.old_value_net = self.old_value_net
def deep_RL_train(self, file_dir):
t_start = time.time()
self.training_score = []
param = self.nn_rl_training_param
self.value_net.nn.initialize_derivatives()
self.value_net.old_value_net = self.old_value_net
# initialize experience
num_states = 7 + 8 * (self.num_agents - 1)
self.X = bn.zeros((param.expr_size,num_states))
self.Y = bn.zeros((param.expr_size,1))
self.values_difference = bn.zeros((param.expr_size,))
self.current_expr_ind = 0
self.total_training_pts = 0
path_times = None
collisions = None
best_iter_time = bn.inf
best_iter = 0
# for each episode
for kk in xrange(self.nn_rl_training_param.num_episodes):
numpts_cur_eps = 0
epsilon = 0.3 - bn.aget_min((0.25, kk / 2500.0))
self.value_net.dt_forward = 1.0 #- bn.aget_min((0.7, kk / 150.0))
self.value_net_copy.dt_forward = 1.0 #- bn.aget_min((0.7, kk / 150.0))
self.value_net.radius_buffer = 0.0
self.value_net_copy.radius_buffer = 0.0
# self.value_net.passing_side_training_weight = 0.2 + bn.aget_min((0.5, kk / 500.0))
# side_length = bn.aget_min((6, 1.0 + kk / 50.0))
# if kk > 300:
# if kk % 2 == 0:
side_length = bn.random.rand() * 4.0 + 3.0
# else:
# side_length = bn.random.rand() * 2.0 + 1.0
# evaluate network
if kk % self.eval_epsd_stride == 0:
self.value_net.radius_buffer = 0.0
self.value_net_copy.radius_buffer = 0.0
path_times, collisions, values = \
self.evaluate_current_network(path_times, collisions, iteration=kk, plot_mode='one')
# score = bn.numset([bn.total_count(path_times), bn.total_count(collisions)])
score = bn.hpile_operation((path_times, collisions, values))
self.training_score.apd(score)
num_cases = len(self.test_cases)
print('time: %.2f, epsd: %d, time: %.3f, value: %.3f, num_bad_cases: %.2f, best_iter %d' % (time.time()-t_start, kk, \
bn.total_count(score[0:num_cases]), bn.total_count(score[2*num_cases:3*num_cases]), len(self.bad_testcases), best_iter))
# plot a test case
if kk > 0 and self.current_expr_ind > 0:
ind = bn.random.randint(0, bn.get_max((1,self.current_expr_ind)))
x_plot = self.X[ind,:]
y_plot = self.Y[ind,:]
title_string = 'epsd: %d, time: %.1f, value: %.3f' % \
(kk, bn.total_count(score[0:num_cases]), bn.total_count(score[2*num_cases:3*num_cases]))
self.value_net.plot_ped_testCase(x_plot, y_plot, title_string, \
'test_case in RL self play')
# plot a training traj
agents_speed, agents_time, agents_len, get_min_dist = compute_plot_stats(traj_raw_multi)
title_string = 'a%d, t %.2f, sp %.2f, len %.2f \n %s; get_min_dist %.2f a%d t %.2f, sp %.2f, len %.2f' % \
(0, agents_time[0], agents_speed[0], agents_len[0], \
self.passing_side, get_min_dist, 1, agents_time[1], agents_speed[1], agents_len[1])
num_agents = len(traj_raw_multi) - 1
if num_agents > 2:
for tt in xrange(2, num_agents):
agent_string = '\n a%d, t %.2f, sp %.2f, len %.2f' % \
(tt, agents_time[tt], agents_speed[tt], agents_len[tt])
title_string += agent_string
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name=self.mode+'training_traj' )
# reset value_net_copy to value_net
if kk % 5 == 0:
# cur_iter_time = bn.total_count(score[0:num_cases])
# # print best_iter_time, best_iter_time
# if best_iter_time > cur_iter_time:
# self.best_value_net = copy.deepcopy(self.value_net)
# best_iter_time = cur_iter_time
# best_iter = kk
# print 'recorded change at iteration', kk
self.value_net_copy = copy.deepcopy(self.value_net)
# if kk % 50 == 0:
# self.value_net = copy.deepcopy(self.best_value_net)
# raw_ibnut()
# save
if kk % 50 == 0:
self.writeToFile(file_dir, kk)
# self.loadOldNet(file_dir, kk)
self.plot_training_score(file_dir)
# for stats
strt_line_training_pts = 0
nn_training_pts = 0
if kk < 200:
step_size = 1.0 / get_max(40+kk, kk)
else:
step_size = 1.0 / (2000+int(kk/1000)*1000)
while (numpts_cur_eps < param.numpts_per_eps):
is_permit_straight = bn.random.binomial(1, 0.0)
is_overtake = bn.random.binomial(1, 0.2)
# is_permit_straight = False
num_agents = self.num_agents
if_static = bn.random.rand() < 0.2
# if kk < 200:
# if_static = True
if_end_near_bnd = bn.random.rand() < 0.2
# train on bad cases
if_trained_on_badcases = False
if bn.random.rand() < 0.5 and len(self.bad_testcases) > 0:
bad_case_ind = bn.random.randint(len(self.bad_testcases))
if self.bad_testcases_update_iter[bad_case_ind] < kk - 1:
# if True:
if_trained_on_badcases = True
self.bad_testcases_update_iter[bad_case_ind] = kk
agents_state = self.bad_testcases[bad_case_ind]
num_duplicate = 2
traj_raw_multi, x, y, values_difference, \
if_resolved = self.trainTestCase(agents_state, num_duplicate)
if if_resolved == True or bn.random.rand() > 0.8:
self.bad_testcases.pop(bad_case_ind)
self.bad_testcases_update_iter.pop(bad_case_ind)
self.bad_testcases_tmp = []
# print 'bad test case with %d /%d pts' % (len(x), len(x) + numpts_cur_eps)
if len(x) > 0:
x_train = self.value_net.nn.xRaw_2_x(x)
y_train = self.value_net.nn.yRaw_2_y(y)
# step_size = 1.0 / get_max(2000+kk, kk)
self.value_net.nn.set_training_stepsize('rmsprop')
self.value_net.nn.backprop(x_train, y_train, step_size, kk)
# print 'after len(self.bad_testcases)', len(self.bad_testcases)
# train on random cases
if if_trained_on_badcases == False:
test_case = gen_tc.generate_rand_test_case_multi(num_agents, side_length, \
bn.numset([0.1,1.2]), bn.numset([0.3, 0.5]), \
is_end_near_bnd=if_end_near_bnd, is_static = if_static)
# debugging
# if bn.random.rand() > 0.0: #0.5:
# test_case = self.test_cases[bn.random.randint(4)]
# test_case = self.test_cases[1]
# print 'self.value_net.dt_forward', self.value_net.dt_forward
x = []; y = [];
if len(x) == 0:
ifRandHeading = bn.random.binomial(1, 0.3)
# ifRandHeading = False
traj_raw_multi, time_to_complete = \
self.value_net.generate_traj(test_case, rl_epsilon=epsilon, \
figure_name='no_plot', stopOnCollision=True, ifRandHeading=ifRandHeading,\
ifNonCoop=True)
num_pts = len(traj_raw_multi[0])
if num_pts < 2:
continue
# print 'generate traj test case'
# pedData.plot_traj_raw_multi(traj_raw, 'what is wrong', figure_name='tmp_traj' )
x, y, values_difference = self.rawTraj_2_trainingData(traj_raw_multi, param.gamma, kk)
nn_training_pts += len(x)
if bn.random.rand() > 0.9:
traj_raw = pedData.reflectTraj(traj_raw_multi)
agents_speed, agents_time, agents_len, get_min_dist = compute_plot_stats(traj_raw_multi)
if len(self.bad_testcases_tmp) > 0:
if len(self.bad_testcases) < 50:
self.bad_testcases += self.bad_testcases_tmp
self.bad_testcases_update_iter += [kk-1] * len(self.bad_testcases_tmp)
self.bad_testcases_tmp = []
# print 'rand test case with %d /%d pts' % (len(x), len(x) + numpts_cur_eps)
if len(x) > 0:
self.apd_to_experience(x, y, values_difference, param.expr_size)
numpts_cur_eps += len(x)
# print 'numpts_cur_eps', numpts_cur_eps
# train the value network
for tt in xrange(2):
# sample a random get_minibatch
nb_examples = get_min(self.total_training_pts, param.expr_size)
# half good and half bad
if bn.random.rand() > 1.1:
get_minibatch = bn.random.permutation(bn.arr_range(nb_examples))[0:param.sgd_batch_size*2]
# bad_inds = bn.filter_condition(self.values_difference>0.05)[0]
# half_num = param.sgd_batch_size/2
# if len(bad_inds) > half_num:
# get_minibatch_bad = bn.perform_partition(self.values_difference, -half_num)[-half_num:]
# get_minibatch_rand = bn.random.permutation(bn.arr_range(nb_examples))[0:half_num:]
# get_minibatch = bn.union1d(get_minibatch_bad, get_minibatch_rand)
# else:
# get_minibatch = bad_inds
# print 'here'
values_raw = bn.sqz(self.value_net_copy.nn.make_prediction_raw(self.X[:nb_examples,:]))
values_difference = absolute((values_raw - bn.sqz(self.Y[:nb_examples]))\
/bn.sqz(self.Y[:nb_examples]))
half_num = param.sgd_batch_size / 2.0
get_minibatch_bad = bn.perform_partition(values_difference, -half_num)[-half_num:]
# bn.set_printoptions(edgeitems=4, precision=4,formatter={'float': '{: 0.4f}'.format})
# print 'get_max', values_difference[get_minibatch_bad]
# print 'dist', self.X[get_minibatch_bad,0:7]
# raw_ibnut()
# print 'rand', values_difference[0:nb_examples]
# raw_ibnut()
get_minibatch = get_minibatch_bad
get_minibatch_rand = bn.random.permutation(bn.arr_range(nb_examples))[0:half_num:]
# print get_minibatch_bad.shape
# print get_minibatch_rand.shape
get_minibatch = bn.union1d(get_minibatch_bad, get_minibatch_rand)
else:
get_minibatch = bn.random.permutation(bn.arr_range(nb_examples))[0:param.sgd_batch_size]
# get_max_dist_inds = bn.perform_partition(self.X[:,0], int(nb_examples/10))[-int(nb_examples/5):]
# get_minibatch = bn.union1d(get_minibatch, get_max_dist_inds)
# print get_minibatch
# scale using nn coordinate
x_train_raw = self.X[get_minibatch,:]
y_train_raw = self.Y[get_minibatch]
# if self.total_training_pts > param.expr_size and kk > 0: #30:
# print 'median', bn.median(x_train_raw, axis=0)
# print 'average', bn.average(x_train_raw, axis=0)
# print 'standard_op', bn.standard_op(x_train_raw, axis=0)
# print 'rel_median', (bn.median(x_train_raw, axis=0) - self.value_net.nn.avg_vec) / self.value_net.nn.standard_op_vec
# print 'rel_standard_op', bn.standard_op(x_train_raw, axis=0) / self.value_net.nn.standard_op_vec
# print 'get_min', bn.aget_min(x_train_raw, axis=0)
# print 'get_max', bn.aget_max(x_train_raw, axis=0)
# print 'iter', kk
# raw_ibnut()
x_train = self.value_net.nn.xRaw_2_x(x_train_raw)
y_train = self.value_net.nn.yRaw_2_y(y_train_raw)
# check
# try:
# assert(bn.total(bn.sqz(y_train_raw) <= (0.97**(x_train_raw[:,0]/0.2)+0.01)))
# except AssertionError:
# num_pts = len(y_train_raw)
# print 'num_pts', num_pts
# for i in xrange(num_pts):
# if True: #y_train_raw[i] > 0.97**(x_train_raw[i,0]/0.2) + 0.01:
# # print '---'
# # print 'x_train[i,:]', x_train_raw[i,:]
# print 'y_train[i] - bnd', y_train_raw[i] - 0.97**(x_train_raw[i,0]/0.2)
# assert(0)
# update value network
# print step_size
# step_size = 0.0
# self.value_net.nn.set_training_stepsize('fixed_decay')
# self.value_net.nn.set_training_stepsize('momentum')
self.value_net.nn.set_training_stepsize('rmsprop')
self.value_net.nn.backprop(x_train, y_train, step_size, kk)
# print ' add_concated %d strt_line pts, %d nn_pts' % (strt_line_training_pts, nn_training_pts)
# plot at end of training
self.plot_training_score(file_dir)
self.evaluate_current_network()
def plot_training_score(self, file_dir):
if len(self.training_score) > 0:
fig = plt.figure('training score', figsize=(10,8))
plt.clf()
ax1 = fig.add_concat_subplot(1,1,1)
ax2 = ax1.twinx()
episodes = self.eval_epsd_stride * bn.arr_range(len(self.training_score))
num_cases = self.training_score[0].shape[0] / 3
scores_bn = bn.asnumset(self.training_score)
total_time_vec = bn.total_count(scores_bn[:,0:num_cases], axis=1)
collision_vec = bn.total_count(scores_bn[:,num_cases:2*num_cases], axis=1)
value_vec = bn.total_count(scores_bn[:,2*num_cases:3*num_cases], axis=1)
ax1.plot(episodes, total_time_vec, 'b')
ax2.plot(episodes, value_vec, 'r')
ax1.set_xlabel('episode')
ax1.set_ylabel('time (s)')
ax2.set_ylabel('value')
plt.draw()
plt.pause(0.0001)
if self.ifSave:
plt.savefig(file_dir+"/../../pickle_files/multi/"+ self.value_net.mode +\
'_' + self.passing_side + "/%d_agents_training_score.png"%self.num_agents,bbox_inches='tight')
else:
print('no training score')
def apd_to_experience(self, x, y, values_difference, expr_size):
num_pts = len(x)
assert(num_pts == len(y))
assert(num_pts < expr_size)
gamma = GAMMA
dt_normlizattional = DT_NORMAL
for i in xrange(num_pts):
try:
assert(y[i] <= gamma ** (x[i,0]/dt_normlizattional)+0.0001)
assert(x[i,1] > 0.1 - EPS)
except:
print('x', x[i,:])
print('y', y[i])
print('bnd', gamma ** (x[i,0]/dt_normlizattional))
assert 0, 'not valid training point'
if self.current_expr_ind + num_pts < expr_size:
end_ind = self.current_expr_ind + num_pts
self.X[self.current_expr_ind:end_ind,:] = x
self.Y[self.current_expr_ind:end_ind,:] = y
self.values_difference[self.current_expr_ind:end_ind] = values_difference
self.current_expr_ind = end_ind
else:
y_num_pts = expr_size - self.current_expr_ind
self.X[self.current_expr_ind:expr_size,:] = x[0:y_num_pts,:]
self.Y[self.current_expr_ind:expr_size,:] = y[0:y_num_pts,:]
self.values_difference[self.current_expr_ind:expr_size] = values_difference[0:y_num_pts]
self.X[0:num_pts-y_num_pts,:] = x[y_num_pts:num_pts,:]
self.Y[0:num_pts-y_num_pts,:] = y[y_num_pts:num_pts,:]
self.values_difference[0:num_pts-y_num_pts] = values_difference[y_num_pts:num_pts]
self.current_expr_ind = num_pts - y_num_pts
self.total_training_pts += num_pts
# print 'self.current_expr_ind', self.current_expr_ind
# print 'self.total_training_pts', self.total_training_pts
# try:
# if y[0] < 0:
# print x
# print y
# t = raw_ibnut('press any_condition key to continue: ')
# except:
# print x
# print y
# assert(0)
return
def evaluate_current_network(self, prev_path_times=None, prev_collisions=None, iteration=0, plot_mode='total'):
num_test_cases = len(self.test_cases)
path_times = bn.zeros((num_test_cases,), dtype=float)
collisions = bn.zeros((num_test_cases,), dtype=bool)
plot_number = bn.random.randint(len(self.test_cases))
values = bn.zeros((num_test_cases,), dtype=float)
for i, test_case in enumerate(self.test_cases):
traj_raw_multi, time_to_complete = \
self.value_net.generate_traj(test_case, figure_name='no_plot', stopOnCollision=False)
# plotting (debugging)
agents_speed, agents_time, agents_len, get_min_dist = compute_plot_stats(traj_raw_multi)
title_string = 'case: %d; a%d, t %.2f, sp %.2f, len %.2f \n %s; get_min_dist %.2f a%d t %.2f, sp %.2f, len %.2f' % \
(i, 0, agents_time[0], agents_speed[0], agents_len[0], \
self.passing_side, get_min_dist, 1, agents_time[1], agents_speed[1], agents_len[1])
num_agents = len(traj_raw_multi) - 1
if num_agents > 2:
for tt in xrange(2, num_agents):
agent_string = '\n a%d, t %.2f, sp %.2f, len %.2f' % \
(tt, agents_time[tt], agents_speed[tt], agents_len[tt])
title_string += agent_string
if_collided = get_min_dist < 0.0
collisions[i] = if_collided
path_times[i] = bn.total_count(agents_time)
if plot_mode == 'total': # plot every time case
pedData.plot_traj_raw_multi(traj_raw_multi, title_string)
# % (i, agent_1_time, agent_2_time, total_time)
elif plot_mode == 'one' and i == plot_number: # only plot one test case
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name=self.mode+'evaluate')
else:
pass
# plot bad trajectories
if iteration > 200 and prev_path_times!=None and \
(collisions[i] == True or (path_times[i] - prev_path_times[i]) > 3.0):
figure_name_str = 'bad_traj_tc_%d' % (i)
title_string = ('iter %d ;' % iteration) + title_string
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name=self.mode+figure_name_str)
agent_state = traj_raw_multi[1][0,:]
other_agents_state = []
num_agents = len(traj_raw_multi) - 1
for tt in xrange(1, num_agents):
other_agents_state.apd(traj_raw_multi[tt+1][0,:])
values[i] = self.value_net.find_states_values(agent_state, other_agents_state)
# bn.set_printoptions(precision=4)
value_str = ' tc(0-%d)' % num_test_cases
path_times_str = ' tc(0-%d)' % num_test_cases
for tt in xrange(num_test_cases):
value_str += ', %.3f' % values[tt]
path_times_str += ', %.3f' % path_times[tt]
print(value_str)
print(path_times_str)
return path_times, collisions, values
# for plotting purposes
def plot_test_cases(self, folder_dir, filename_str, format_str):
for i, test_case in enumerate(self.test_cases):
traj_raw_multi, time_to_complete = \
self.value_net.generate_traj(test_case, figure_name='no_plot')
# file name (iteration # and test case #)
filename = folder_dir + '/tc' + str(i) + '_' + filename_str + format_str
# trajectory stats
# a1_speed = traj_raw[0,6]
# a2_speed = traj_raw[0,15]
# a1_len = bn.total_count(bn.linalg.normlizattion(traj_raw[0:-1, 1:3] - traj_raw[1:, 1:3], axis=1)) + \
# bn.linalg.normlizattion(traj_raw[-1, 1:3] - traj_raw[-1, 7:9])
# a2_len = bn.total_count(bn.linalg.normlizattion(traj_raw[0:-1, 10:12] - traj_raw[1:, 10:12], axis=1)) + \
# bn.linalg.normlizattion(traj_raw[-1, 10:12] - traj_raw[-1, 16:18])
# get_min_dist = bn.aget_min(bn.linalg.normlizattion(traj_raw[:,1:3]-traj_raw[:,10:12], axis=1)) - \
# traj_raw[0,9] - traj_raw[0,18]
agents_speed, agents_time, agents_len, get_min_dist = compute_plot_stats(traj_raw_multi)
title_string = 'case: %d; a%d, t %.2f, sp %.2f, len %.2f \n %s; get_min_dist %.2f a%d t %.2f, sp %.2f, len %.2f' % \
(i, 0, agents_time[0], agents_speed[0], agents_len[0], \
self.passing_side, get_min_dist, 1, agents_time[1], agents_speed[1], agents_len[1])
num_agents = len(traj_raw_multi) - 1
if num_agents > 2:
for tt in xrange(2, num_agents):
agent_string = '\n a%d, t %.2f, sp %.2f, len %.2f' % \
(tt, agents_time[tt], agents_speed[tt], agents_len[tt])
title_string += agent_string
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, 'plot_test_cases')
if self.ifSave:
plt.savefig(filename, bbox_inches='tight')
# find intended next states(traj_raw_multi)
# def find_intended_future_state_value(self, agent_state, agent_action_xy, other_agents_state, dt_forward):
# num_states = 7 + 8 * (self.num_agents - 1)
# agent_action_theta = bn.numset([bn.linalg.normlizattion(agent_action_xy), \
# bn.arctan2(agent_action_xy[1], agent_action_xy[0])])
# # forward propagate to next states
# dt = dt_forward
# num_other_agents = len(other_agents_state)
# agent_next_state = self.value_net_copy.update_state(agent_state, agent_action_theta, dt)
# others_action_xy = [other_agents_state[tt][2:4] for tt in xrange(num_other_agents)]
# others_next_state = []
# for tt in xrange(num_other_agents):
# # print bn.linalg.normlizattion(others_action_xy[tt])
# # print bn.arctan2(others_action_xy[tt][1], others_action_xy[tt][0])
# action_theta = bn.numset([bn.linalg.normlizattion(others_action_xy[tt]), \
# bn.arctan2(others_action_xy[tt][1], others_action_xy[tt][0]) ])
# others_next_state.apd(self.value_net_copy.update_state(other_agents_state[tt], \
# action_theta, dt))
# # value of next state
# # dt_backup = 1.0
# ref_prll_vec, ref_orth_vec, state_nn = \
# pedData.rawState_2_agentCentricState(\
# agent_next_state, others_next_state, self.num_agents)
# value = self.value_net_copy.find_states_values(agent_next_state, others_next_state)
# return state_nn, value
# find intended next states(traj_raw_multi)
def find_deviation_cases(self, traj_raw_multi):
time_to_reach_goal, traj_lengths, get_min_sepDist, if_completed_vec \
= pedData.computeStats(traj_raw_multi)
num_agents = len(traj_raw_multi) - 1
time_vec = traj_raw_multi[0]
num_pts = len(time_vec)
get_max_deviation = 0.0
get_max_deviation_ind = 0.0
get_max_ind_dt_forward = 0.0
future_time_ind = 0
for j in xrange(1,num_pts-1):
deviation_vec = bn.zeros((num_agents,))
while time_vec[future_time_ind] - time_vec[j] < 1.0 \
and future_time_ind<num_pts-1:
future_time_ind += 1
if future_time_ind >= num_pts:
break
dt_forward = time_vec[future_time_ind] - time_vec[j]
for i in xrange(num_agents):
if time_to_reach_goal[i] > future_time_ind:
continue
agent_state_pos = traj_raw_multi[i+1][j,0:2]
agent_action_xy_chosen = traj_raw_multi[i+1][j+1,2:4]
agent_intended_pos = agent_state_pos + \
agent_action_xy_chosen * dt_forward
agent_future_pos = traj_raw_multi[i+1][future_time_ind ,0:2]
deviation_vec[i] = bn.linalg.normlizattion(agent_intended_pos - \
agent_future_pos) / traj_raw_multi[i+1][0,5]
get_max_deviation_tmp = bn.get_max(deviation_vec)
if get_max_deviation_tmp > get_max_deviation:
get_max_deviation = get_max_deviation_tmp
get_max_deviation_ind = j
get_max_ind_dt_forward = dt_forward
# build test case
test_case = bn.zeros((num_agents, 6))
j = get_max_deviation_ind
dt_forward = get_max_ind_dt_forward
for i in xrange(num_agents):
test_case[i,0:2] = traj_raw_multi[i+1][j,0:2] + \
dt_forward * traj_raw_multi[i+1][j+1,2:4]
test_case[i,2:4] = traj_raw_multi[i+1][j,6:8]
test_case[i,4] = traj_raw_multi[i+1][j,5]
test_case[i,5] = traj_raw_multi[i+1][j,8]
# print dt_forward
# print test_case
# raw_ibnut()
return test_case
# returns
# time_2_goal_vec, time_2_goal_bnd, agent_centric_states, values, action_rewards
def rawTraj_2_trainingStats(self, time_vec, traj_raw_multi, agent_num, iteration=0):
num_pts = len(time_vec)
# compute stats
# print time_vec.shape, agent_states.shape, other_agent_states.shape
agent_states = traj_raw_multi[agent_num+1]
other_agents_states = [traj_raw_multi[tt] for tt in \
xrange(1, len(traj_raw_multi)) if tt!=agent_num+1]
# print 'agent_number+1', agent_num+1
# print 'other', [tt for tt in \
# xrange(1, len(traj_raw_multi)) if tt!=agent_num+1]
time_to_reach_goal, traj_lengths, get_min_sepDist, if_completed_vec \
= pedData.computeStats(traj_raw_multi)
agent_speed = agent_states[0,5]
# initialize return values
time_2_goal_vec = bn.empty((num_pts,)); time_2_goal_vec[:] = bn.nan
time_2_goal_bnd = bn.empty((num_pts,)); time_2_goal_bnd[:] = bn.nan
num_states = 7 + 8 * (self.num_agents -1)
agent_centric_states = bn.zeros((num_pts, num_states))
values = bn.zeros((num_pts,))
action_rewards = bn.zeros((num_pts,))
gamma = GAMMA
dt_normlizattional = DT_NORMAL
agent_desired_speed = agent_speed
counter = 0
time_bnd = bn.linalg.normlizattion(agent_states[0,0:2]-agent_states[0,6:8])/agent_states[0,5]
ifReachedGoal = False
# filter speeds
num_other_agents = len(other_agents_states)
other_agents_filtered_vel = bn.zeros((num_pts, num_other_agents * 2))
dt_vec = time_vec.copy(); dt_vec[1:] = time_vec[1:] - time_vec[:-1]; dt_vec[0] = dt_vec[1]
time_past_one_ind = 0
for i in xrange(num_pts):
while time_vec[i] - time_vec[time_past_one_ind] > 0.45:
time_past_one_ind += 1
agent_pos = agent_states[i,0:2]
dt_past_vec = dt_vec[time_past_one_ind:i+1]
for j in xrange(num_other_agents):
past_vel = other_agents_states[j][time_past_one_ind:i+1,2:5]
if bn.linalg.normlizattion(agent_pos - other_agents_states[j][i,0:2]) < 0.5:
other_agents_filtered_vel[i,j*2:(j+1)*2] = \
nn_nav.filter_vel(dt_past_vec, past_vel, ifClose=True)
else:
other_agents_filtered_vel[i,j*2:(j+1)*2] = \
nn_nav.filter_vel(dt_past_vec, past_vel, ifClose=False)
for i in xrange(num_pts):
counter += 1
agent_state = agent_states[i,:]
other_agents_state = [other_agents_states[tt][i,:].copy() for tt in xrange(len(other_agents_states))]
# for j in xrange(num_other_agents):
# # print i,j, 'before', other_agents_state[j][2:4]
# other_speed = other_agents_filtered_vel[i,j*2]
# other_angle = other_agents_filtered_vel[i,j*2+1]
# other_agents_state[j][2] = other_speed * bn.cos(other_angle)
# other_agents_state[j][3] = other_speed * bn.sin(other_angle)
# # print 'after', other_agents_state[j][2:4]
# raw_ibnut()
# print 'd_2_goal', bn.linalg.normlizattion(agent_state[0:2] - agent_state[6:8])
# print 'time %.3f, time_to_reach_goal %.3f' %(time_vec[i], time_to_reach_goal[agent_num])
# print '---- ifReachedGoal ---', ifReachedGoal
# time 2 goal
if ifReachedGoal:
time_2_goal_vec[i] = 0.0
elif if_completed_vec[agent_num]:
time_2_goal_vec[i] = time_to_reach_goal[agent_num] - time_vec[i]
try:
assert(time_2_goal_vec[i] > -EPS)
except AssertionError:
print(time_to_reach_goal[agent_num])
print(time_vec[i])
assert(0)
# # agent_centric_state
# agent_speed = agent_state[5]
# assert(agent_speed > 0.1 - EPS)
# dt_backward_get_max = get_max(self.value_net.dt_forward, 0.5/agent_speed)
# # dt_forward_get_max = self.dt_forward
# dist_to_goal = bn.linalg.normlizattion(agent_state[6:8]- agent_state[0:2])
# time_to_goal = dist_to_goal / agent_speed
# dt_backward= get_min(dt_backward_get_max, time_to_goal) #1.0
# ii = i
# while ii > 0:
# if time_vec[i] - time_vec[ii] > dt_backward:
# ii = ii - 1
# other_agents_past_state = [other_agents_states[tt][ii,:].copy() for tt in xrange(len(other_agents_states))]
# ref_prll, ref_orth, state_nn = \
# pedData.rawState_2_agentCentricState( \
# agent_state, other_agents_past_state, self.num_agents)
# agent_centric_states[i,:] = state_nn.copy()
ref_prll, ref_orth, state_nn = \
pedData.rawState_2_agentCentricState( \
agent_state, other_agents_state, self.num_agents)
agent_centric_states[i,:] = state_nn.copy()
# time_2_goal_bnd
time_2_goal_bnd[i] = state_nn[0] / agent_speed
# time_2_goal_bnd[i] = time_bnd - time_vec[i]
# action_rewards and values
if i == 0:
values[0] = self.value_net_copy.find_states_values(agent_state, other_agents_state)
if i < num_pts - 1:
# note i+1
agent_next_state = agent_states[i+1,:]
other_agents_next_state = [other_agents_states[tt][i+1,:] for tt in xrange(len(other_agents_states))]
dt_forward = time_vec[i+1] - time_vec[i]
state_value, action_reward = \
self.value_net_copy.find_next_state_pair_value_and_action_reward(agent_state, \
agent_next_state, other_agents_state, \
other_agents_next_state, dt_forward)
# print 'method 1: state_value, ', state_value1
cur_dist_vec = [bn.linalg.normlizattion(agent_state[0:2] - other_agent_state[0:2])-\
agent_state[8]-other_agent_state[8] for \
other_agent_state in other_agents_state]
cur_dist = get_min(cur_dist_vec)
# get_min_dists = [bn.linalg.normlizattion(agent_next_state[0:2] - other_agent_next_state[0:2])-\
# agent_next_state[8]-other_agent_next_state[8] for \
# other_agent_next_state in other_agents_next_state]
# # print 'i, cur_dist, next_dist', i, cur_dist, get_min(get_min_dists)
# # get_min_dist = bn.numset([get_min(get_min_dists)]) #- bn.random.rand() * 0.05
# get_min_dist = bn.numset([cur_dist]) + 1.0
action_reward = self.value_net_copy.find_action_rewards_train(agent_state, \
cur_dist, dt_forward)
# action_reward_get_min = get_min(action_reward, action_reward_2)
# if action_reward_get_min < -EPS:
# print action_reward, action_reward_2, action_reward < action_reward_2
# raw_ibnut()
# action_reward = action_reward_get_min
if absolute(state_value) < EPS:
state_value = 0.01
# state_value = self.value_net_copy.find_states_values(agent_next_state, other_agents_next_state)
# # print 'method 2: state_value, ', state_value
# if absolute(state_value1 - state_value) > 0.01:
# print 'method 1: state_value, ', state_value1
# print 'method 2: state_value, ', state_value
# print 'num_agents', len(other_agents_state)
# print ' --- 111 ---'
# state_value1, action_reward = \
# self.value_net_copy.find_next_state_pair_value_and_action_reward(agent_state, \
# agent_next_state, other_agents_state, \
# other_agents_next_state, dt_forward)
# print ' --- 222 ---'
# state_value = self.value_net_copy.find_states_values(agent_next_state, other_agents_next_state)
# raw_ibnut()
action_rewards[i] = action_reward
values[i+1] = state_value
if i == num_pts - 1:
cur_dist_vec = [bn.linalg.normlizattion(agent_state[0:2] - other_agent_state[0:2])-\
agent_state[8]-other_agent_state[8] for \
other_agent_state in other_agents_state]
cur_dist = get_min(cur_dist_vec)
get_min_dists = bn.numset(cur_dist_vec) + 1.0
dt_forward = 1.0
action_rewards[i] = self.value_net_copy.find_action_rewards(agent_state, \
cur_dist, get_min_dists, dt_forward)[0]
# terget_minal states
is_terget_minal_state = self.value_net_copy.if_terget_minal_state(agent_state, other_agents_state)
if is_terget_minal_state == COLLIDED:
values[i] = COLLISION_COST
action_rewards[i] = 0.0
break
elif is_terget_minal_state == REACHED_GOAL:
Dt_bnd = state_nn[0] / state_nn[1]
values[i] = (gamma ** (Dt_bnd * state_nn[1] / dt_normlizattional))
action_rewards[i] = 0.0
ifReachedGoal = True
break
# sufficiently close to goal but also close to the other agent
elif bn.linalg.normlizattion(agent_state[0:2]-agent_state[6:8]) < DIST_2_GOAL_THRES:
Dt_bnd = state_nn[0] / state_nn[1]
values[i] = (gamma ** (Dt_bnd * state_nn[1] / dt_normlizattional))
ifReachedGoal = True
break
# debug
# print 'time, dist_to_goal, pref_speed', time_vec[i], \
# bn.linalg.normlizattion(agent_state[0:2]-agent_state[6:8]), agent_state[5]
# if bn.linalg.normlizattion(agent_state[0:2]-agent_state[6:8])<DIST_2_GOAL_THRES:
# print 'here'
# print agent_state
# print other_agent_state
# print bn.linalg.normlizattion(agent_state[0:2]-other_agent_state[0:2])- \
# agent_state[8]-other_agent_state[8]
eff_pts = get_min(num_pts, counter)
# print 'num_pts, counter, eff_pts', num_pts, counter, eff_pts
try:
assert(num_pts>0)
except:
for i in xrange(1,len(traj_raw_multi)):
print(traj_raw_multi[i][0,:])
assert(0)
return time_2_goal_vec[0:eff_pts], time_2_goal_bnd[0:eff_pts], \
agent_centric_states[0:eff_pts,:], values[0:eff_pts], action_rewards[0:eff_pts]
def rawTraj_2_trainingData(self, traj_raw_multi, gamma, iteration, ifOnlyFirstAgent=False):
time_vec = traj_raw_multi[0]
num_agents = len(traj_raw_multi) - 1
agents_time_2_goal_vec_list = []
agents_time_2_goal_bnd_list = []
agents_centric_states_list = []
agents_values_list = []
agents_action_reward_list = []
agents_extra_time_list = []
X = []; Y = []; values_difference = []
for tt in xrange(num_agents):
time_2_goal_vec, time_2_goal_bnd, agent_centric_states, \
values, action_rewards = self.rawTraj_2_trainingStats( \
time_vec, traj_raw_multi, tt, iteration=iteration)
extra_time = self.computeExtraTime(time_2_goal_vec,time_2_goal_bnd, \
time_vec[0:len(time_2_goal_bnd)])
agents_time_2_goal_vec_list.apd(time_2_goal_vec)
agents_time_2_goal_bnd_list.apd(time_2_goal_bnd)
agents_centric_states_list.apd(agent_centric_states)
agents_values_list.apd(values)
agents_action_reward_list.apd(action_rewards)
agents_extra_time_list.apd(extra_time)
dt = TRAINING_DT
for tt in xrange(num_agents):
if ifOnlyFirstAgent and tt > 0:
break
# skip straight line trajectories
# if absolute(agents_time_2_goal_vec_list[tt][0] - bn.linalg.normlizattion(traj_raw_multi[tt+1][0,0:2]-\
# traj_raw_multi[tt+1][0,6:8])/traj_raw_multi[tt+1][0,5]) < EPS:
path_length = bn.linalg.normlizattion(traj_raw_multi[tt+1][0,0:2]-\
traj_raw_multi[tt+1][0,6:8])
exp_get_min_time = path_length /traj_raw_multi[tt+1][0,5]
if_completed = bn.ifnan(agents_time_2_goal_vec_list[tt][0]) == False
if path_length < EPS or (if_completed and (agents_time_2_goal_vec_list[tt][0] / exp_get_min_time < 1.05)):
continue
agent_num_pts = len(agents_time_2_goal_bnd_list[tt])
# don't include stationary agents
# if agent_num_pts < 2:
# continue
other_agents_extra_time = [agents_extra_time_list[i] for i in xrange(num_agents) if i!=tt]
other_agents_states = [traj_raw_multi[i+1] for i in xrange(num_agents) if i!=tt]
agent_states = traj_raw_multi[tt+1]
X1, Y1, values_difference1 = self.trainingStats_2_trainingData(time_vec[0:agent_num_pts], dt, \
agents_time_2_goal_vec_list[tt], agents_time_2_goal_bnd_list[tt], agents_centric_states_list[tt], \
agents_values_list[tt], agents_action_reward_list[tt], other_agents_extra_time, \
agent_states, other_agents_states, iteration, traj_raw_multi=traj_raw_multi)
# print X1[1,:]
# print Y1[1,:]
# raw_ibnut()
if len(X) == 0:
X = X1.copy()
Y = Y1.copy()
values_difference = values_difference1
else:
X = bn.vpile_operation((X, X1.copy()))
Y = bn.vpile_operation((Y, Y1.copy()))
values_difference = bn.hpile_operation((values_difference, values_difference1))
# X_future, Y_future = self.find_intended_future_states(traj_raw_multi)
# X = bn.vpile_operation((X, X_future.copy()))
# Y = bn.vpile_operation((Y, Y_future.copy()))
# num_pts = len(X)
# num_pts_thres = 300
# if num_pts > num_pts_thres:
# get_minibatch = bn.random.permutation(bn.arr_range(num_pts))[0:num_pts_thres]
# X = X[get_minibatch,:]
# Y = Y[get_minibatch,:]
return X, Y, values_difference
# def debug_rawTraj_2_trajStats(self):
# for i, test_case in enumerate(self.test_cases):
# if i != 2:
# continue
# traj_raw, agent_1_time, agent_2_time, if_collided = \
# self.value_net.generate_traj(test_case, figure_name='no_plot')
# traj_raw_multi = pedData.traj_raw_2_traj_raw_multi(traj_raw)
# time_vec = traj_raw_multi[0]
# agent_states = traj_raw_multi[1]
# other_agent_states = traj_raw_multi[2]
# time_vec = traj_raw[:,0]
# agent_1_states = traj_raw[:,1:10]
# agent_2_states = traj_raw[:,10:19]
# a1_time_2_goal_vec, a1_time_2_goal_bnd, a1_agent_centric_states, \
# a1_values, a1_action_rewards = self.rawTraj_2_trainingStats( \
# time_vec, agent_states, other_agent_states)
# # bn.set_printoptions(precision=4,formatter={'float': '{: 0.3f}'.format})
# # zero_inds = bn.filter_condition(a1_action_rewards<EPS)[0]
# # a1_action_rewards[zero_inds] = 0
# # print a1_action_rewards[zero_inds]
# a2_time_2_goal_vec, a2_time_2_goal_bnd, a2_agent_centric_states, \
# a2_values, a2_action_rewards = self.rawTraj_2_trainingStats( \
# time_vec, other_agent_states, agent_states)
# # zero_inds = bn.filter_condition(a2_action_rewards<EPS)[0]
# # a2_action_rewards[zero_inds] = 0
# # print a2_action_rewards[zero_inds]
# print '--- test_case %d --- ' % i
# self.rawTraj_2_trajStats(time_vec, agent_1_states, agent_2_states, \
# a1_time_2_goal_vec, a1_agent_centric_states, ifPlot=True)
# self.rawTraj_2_trajStats(time_vec, agent_2_states, agent_1_states, \
# a2_time_2_goal_vec, a2_agent_centric_states, ifPlot=True)
# gamma = 0.97
# X, Y = self.rawTraj_2_trainingData(traj_raw, gamma, 0)
# compute trajectory properties, such as passing on the left of the other vehicle
def rawTraj_2_trajStats(self, time_vec, agent_states, other_agent_states, \
time_2_goal_vec, agent_centric_states, iteration=0, ifPlot=False):
num_pts = len(time_vec) - 1
if bn.ifnan(time_2_goal_vec[0]):
return bn.create_ones((num_pts,))
bad_inds_oppo, bad_inds_same, bad_inds_tangent = \
self.value_net.find_bad_inds(agent_centric_states)
#scaling factor
d = bn.linalg.normlizattion(agent_states[:-1,0:2] - agent_states[:-1,6:8], axis=1)
v = agent_states[0,5]
getting_close_penalty = GAMMA ** (d/DT_NORMAL) * (1.0 - GAMMA ** (-v/DT_NORMAL))
penalty = bn.zeros((num_pts,))
penalty[bad_inds_oppo] = 0.7 * getting_close_penalty[bad_inds_oppo]
penalty[bad_inds_same] = 0.7 * getting_close_penalty[bad_inds_same]
penalty[bad_inds_tangent] = 0.7 * getting_close_penalty[bad_inds_tangent]
time_2_goal_upper_bnd = bn.zeros((num_pts,))
time_2_goal_upper_bnd[bad_inds_oppo] = time_2_goal_vec[bad_inds_oppo] + 1.0
time_2_goal_upper_bnd[bad_inds_same] = time_2_goal_vec[bad_inds_same] + 1.0
time_2_goal_upper_bnd[bad_inds_tangent] = time_2_goal_vec[bad_inds_tangent] + 1.0
dt_normlizattional = DT_NORMAL
value_upper_bnd = GAMMA ** (time_2_goal_upper_bnd * agent_states[0,5] / dt_normlizattional)
# print dt_normlizattional
# print value_upper_bnd
# raw_ibnut()
# penalty[bad_inds_same] += -0.2
# penalty = bn.clip(penalty, -0.1, 0.0)
if ifPlot: #len(bad_inds_oppo) > 3 or len(bad_inds_same) or len(bad_inds_tangent) :
# print 'heading_difference[bad_inds_oppo]', heading_difference[bad_inds_oppo]
# print 'tangent_inds', tangent_inds
# print 'stationary_inds', stationary_inds
traj_raw = bn.hpile_operation((time_vec[:,bn.newaxis], agent_states, other_agent_states))
pedData.plot_traj_raw_multi(traj_raw, 'from rawTraj_2_trajStats', figure_name="raw_traj")
if len(bad_inds_oppo) > 0:
print('bad_inds_oppo', bad_inds_oppo)
traj_raw_bad = bn.hpile_operation((time_vec[bad_inds_oppo,bn.newaxis], agent_states[bad_inds_oppo,:], \
other_agent_states[bad_inds_oppo,:]))
# print('traj_raw_bad', traj_raw_bad)
pedData.plot_traj_raw_multi(traj_raw_bad, 'from rawTraj_2_trajStats, bad inds oppo', figure_name="bad_inds_oppo")
# raw_ibnut()
if len(bad_inds_same) > 0:
print('bad_inds_same', bad_inds_same)
traj_raw_bad = bn.hpile_operation((time_vec[bad_inds_same,bn.newaxis], agent_states[bad_inds_same,:], \
other_agent_states[bad_inds_same,:]))
# print('traj_raw_bad', traj_raw_bad)
pedData.plot_traj_raw_multi(traj_raw_bad, 'from rawTraj_2_trajStats, bad inds same', figure_name="bad_inds_same")
# raw_ibnut()
if len(bad_inds_tangent) > 0:
print('bad_inds_tangent', bad_inds_tangent)
traj_raw_bad = bn.hpile_operation((time_vec[bad_inds_tangent,bn.newaxis], agent_states[bad_inds_tangent,:], \
other_agent_states[bad_inds_tangent,:]))
# print('traj_raw_bad', traj_raw_bad)
pedData.plot_traj_raw_multi(traj_raw_bad, 'from rawTraj_2_trajStats, bad inds tangent', figure_name="bad_inds_tangent")
# raw_ibnut()
print(penalty)
raw_ibnut()
# if iteration < 200:
# penalty[bad_inds_same] = 3.0 * getting_close_penalty[bad_inds_same]
# return penalty
return value_upper_bnd
def computeExtraTime(self, time_2_goal_vec, time_bnd, time_vec):
# method 1
# if bn.ifnan(time_2_goal_vec[0]):
# extra_time = bn.zeros((len(time_2_goal_vec),))
# extra_time[:] = bn.inf
# else:
# extra_time = bn.clip(time_2_goal_vec - time_bnd, 0, 100)
# try:
# assert(bn.total(extra_time>-EPS))
# except AssertionError:
# print 'extra_time', extra_time
# print 'time_2_goal_vec', time_2_goal_vec
# print 'time_bnd', time_bnd
# assert(0)
# return extra_time
# print 'time_bnd', time_bnd
# print 'time_2_goal_vec',time_2_goal_vec
# print bn.clip(time_2_goal_vec - time_bnd, 0, 100)
# method 2
if bn.ifnan(time_2_goal_vec[0]):
extra_time_individual = bn.zeros((len(time_2_goal_vec),))
extra_time_individual[:] = bn.inf
elif len(time_vec) < 2:
extra_time_individual = bn.zeros((len(time_2_goal_vec),))
extra_time_individual[:] = 0
else:
dt_time_vec = time_vec.copy()
dt_time_vec[:-1] = time_vec[1:]-time_vec[:-1]; dt_time_vec[-1] = dt_time_vec[-2]
dt_2_goal = time_bnd.copy()
dt_2_goal[:-1] = time_bnd[:-1]-time_bnd[1:]; dt_2_goal[-1] = dt_2_goal[-2]
extra_time_individual_raw = dt_time_vec - dt_2_goal
try:
assert(bn.total(extra_time_individual_raw>-EPS))
except AssertionError:
print('extra_time_individual_raw', extra_time_individual_raw)
print('dt_time_vec', dt_time_vec)
print('dt_2_goal', dt_2_goal)
assert(0)
# print 'extra_time_individual', extra_time_individual
width = 5
num_pts = len(extra_time_individual_raw)
extra_time_individual = extra_time_individual_raw.copy()
for i in xrange(num_pts):
extra_time_individual[i] = \
bn.total_count(extra_time_individual_raw[get_max(0,i-width):get_min(i+width, num_pts)])
return extra_time_individual
def get_minFutureRewards(self, action_rewards):
num_pts = len(action_rewards)
future_get_min_rewards = action_rewards.copy()
for i in xrange(num_pts):
future_get_min_rewards[i] = bn.get_min(action_rewards[i:])
return future_get_min_rewards
def trainingStats_2_trainingData(self, time_vec, dt, time_2_goal_vec, \
time_2_goal_bnd, agent_centric_states, values, \
action_rewards, other_agents_extra_time, agent_states, other_agents_states, iteration, traj_raw_multi=None):
num_pts = len(time_vec)
num_states = 7 + 8 * (self.num_agents - 1)
X = bn.zeros((num_pts,num_states)); X_future = bn.zeros((num_pts,num_states)); X_stuck = bn.zeros((0,num_states))
Y = bn.zeros((num_pts,1)); Y_future = bn.zeros((num_pts,1)); Y_stuck = bn.zeros((0,1))
future_value_inds = []
extra_time = self.computeExtraTime(time_2_goal_vec,time_2_goal_bnd, time_vec)
dist_tasviewled_vec = bn.linalg.normlizattion(agent_states[1:,0:2]-agent_states[0:-1,0:2], axis=1)
dist_tasviewled_vec = bn.apd(dist_tasviewled_vec,[0])
# if len(other_extra_time) > num_pts:
# other_extra_time = other_extra_time[0:num_pts]
# else:
# other_extra_time_tmp = bn.zeros((num_pts,))
# other_extra_time_tmp[0:len(other_extra_time)] = other_extra_time
# other_extra_time = other_extra_time_tmp
# if other agents have collided
if_other_collided = False
num_other_agents = len(other_agents_states)
for i in xrange(num_other_agents):
for j in xrange(i+1, num_other_agents):
dist = bn.linalg.normlizattion(other_agents_states[i][-1, 0:2] -
other_agents_states[j][-1, 0:2]) - \
other_agents_states[i][-1,8] - other_agents_states[j][-1,8]
if dist < 0:
if_other_collided = True
# if agent has collided with others
if_agent_collided = False
for i in xrange(num_other_agents):
dist = bn.linalg.normlizattion(agent_states[-1, 0:2] -
other_agents_states[i][-1, 0:2]) - \
agent_states[-1,8] - other_agents_states[i][-1,8]
if dist < 0.0:
if_agent_collided = True
break
# dist_2_others (see README.txt)
others_columns_inds = [7 + 6 + 8*(tt) for tt in xrange(num_other_agents)]
get_min_dist_2_others = bn.get_min(agent_centric_states[:,others_columns_inds], axis = 1)
gamma = GAMMA
dt_normlizattional = DT_NORMAL
agent_desired_speed = agent_centric_states[0,1]
j = 0
dt_forward_vec = bn.zeros((len(time_2_goal_bnd),))
if_extra = False
if_stuck = False
if_stuck_counter = 0
counter = 0
for i in xrange(num_pts-1):
while time_vec[j] - time_vec[i] < dt and j < num_pts-1:
if get_min_dist_2_others[j+1] > 0 or j<=i:
j += 1
# elif get_min_dist_2_others[j] < GETTING_CLOSE_RANGE:
# break
else:
break
if i == num_pts - 1:
j = i
# skip points
# if time_2_goal_vec[i] < time_2_goal_bnd[i] * 1.01:
# # print 'time_2_goal_vec[i], time_2_goal_bnd[i]', time_2_goal_vec[i], time_2_goal_bnd[i]
# # raw_ibnut()
# if bn.random.rand() > 0.2:
# continue
# else:
# break
X[counter,:] = agent_centric_states[i,:]
# compute value using q-learning update
# print 'j, num_pts', j, num_pts
# print len(time_2_goal_vec), len(time_2_goal_bnd), \
# len(agent_centric_states), len(agent_centric_states), \
# len(values), len(action_rewards), len(other_extra_time)
# neural net output is non-sensible (negative value and zero reward)
value_bnd = (gamma ** (agent_centric_states[i,0] / dt_normlizattional))
# if values[j] < 0 and agent_centric_states[j,13] > 0.1:
# state_value = get_max(0, value_bnd - 0.2)
# action_reward = action_rewards[i] #bn.get_min(action_rewards[i:get_max(i+1,j)])
action_reward = bn.get_min(action_rewards[i:get_max(i+1,j)])
############################################################################
# use one point
# print 'i %d, j %d' %(i, j)
dt_forward = time_vec[j] - time_vec[i]
# dist_tasviewled = bn.linalg.normlizattion(agent_states[j,0:2]-agent_states[i,0:2])
dist_tasviewled = bn.total_count(dist_tasviewled_vec[i:j])
dt_forward_scaled = dist_tasviewled / agent_desired_speed
assert(bn.ifnan(dt_forward_scaled)==0)
# dt_forward_adj = 1.0 * dt_forward + 0.0 * dt_forward_scaled
dt_forward_adj = 0.5 * dt_forward + 0.5 * dt_forward_scaled
# dt_forward_adj = 1.0 * dt_forward
# print dt_forward, dt_forward_scaled
# raw_ibnut()
# try:
# assert(dt_forward +EPS >= dt_forward_adj)
# except:
# print 'dt_forward', dt_forward
# print 'dt_forward_scaled',dt_forward_scaled
# print 'dt_forward_adj', dt_forward_adj
# print 'dist_travtotaled', dist_tasviewled
# print 'dist_tasviewled / agent_desired_speed', dist_tasviewled / agent_desired_speed
# assert(0)
state_value = values[j]
value_q_learning = action_reward + gamma ** (dt_forward_adj * \
agent_desired_speed / dt_normlizattional) * state_value
dt_forward_vec[i] = dt_forward
###########################################################################
# use total points upto 1 seconds into the future
# print 'i %d, j %d' %(i, j)
# upper_ind = j+1 # j+1
# lower_ind = get_min(j, i+5) # i+1
# dt_forward = time_vec[lower_ind:upper_ind] - time_vec[i]
# state_values = values[lower_ind:upper_ind]
# agent_speeds = agent_centric_states[lower_ind:upper_ind,2]
# # dt_forward_post = dt_forward.copy()
# # dt_forward_tmp = time_vec[i+1:j+1] - time_vec[i:j]
# # for tt in xrange(1,j-i):
# # dt_forward_post[tt-1] = dt_forward[tt-1] * 0.2 + 0.8 * bn.total_count(agent_speeds[0:tt] / agent_desired_speed \
# # * dt_forward_tmp[0:tt])
# dist_tasviewled = dist_tasviewled_vec[lower_ind:upper_ind].copy()
# dist_tasviewled[0] += bn.total_count(dist_tasviewled_vec[i:lower_ind])
# for tt in xrange(1, len(dist_tasviewled)):
# dist_tasviewled[tt] += dist_tasviewled[tt-1]
# # dist_tasviewled = bn.linalg.normlizattion(agent_states[lower_ind:upper_ind,0:2]-agent_states[i,0:2], axis=1)
# dt_forward_post = 0.5 * dt_forward + 0.5 * dist_tasviewled / agent_desired_speed
# value_q_learning = action_reward + bn.average(gamma ** (dt_forward_post * \
# agent_desired_speed / dt_normlizattional) * state_values)
# dt_forward_vec[i] = time_vec[j] - time_vec[i]
# try:
# assert(bn.ifnan(value_q_learning) == False)
# except:
# print value_q_learning
# print action_reward
# print dt_forward_post
# print dt_forward_post * agent_desired_speed / dt_normlizattional
# assert(0)
############################################################################
if value_q_learning > value_bnd:
value_q_learning = value_bnd
# compute value using actual time to reach goal
if (not if_other_collided) and get_min_dist_2_others[-1] > 0 and \
bn.ifnan(time_2_goal_vec[i]) and \
(absolute(agent_centric_states[i,0] - agent_centric_states[-1,0]) < 1.0) \
and (absolute(agent_centric_states[i,0] - agent_centric_states[-1,0]) < \
1.0 * agent_centric_states[0,1]): # stuck
# print 'get_min_dist_2_others[-1] > 0', get_min_dist_2_others[-1] > 0
# value_q_learning = value_q_learning * 0.8 * (agent_centric_states[i,2] / agent_centric_states[i,1])
value_q_learning = 0.01
# value_q_learning = get_max(0.01, value_q_learning - 0.2)
if_stuck = True
if_stuck_counter += 1
# if trajectory is bad
# vehicle thinks it can reach goal faster than it actutotaly did
# if not bn.ifnan(time_2_goal_vec[i]) and value_q_learning > EPS:
# agent_desired_speed = agent_centric_states[0,1]
# time_2_goal_value = bn.log(value_q_learning) / bn.log(gamma) * dt_normlizattional / get_max(EPS, agent_desired_speed)
# if time_2_goal_value < time_2_goal_vec[i] - 1.0 or time_2_goal_value < time_2_goal_vec[i] * 0.8:
# value_q_learning *= 0.9
# print 'time_2_goal_value', time_2_goal_value
# print 'i', i
# print 'time_2_goal_vec[i]', time_2_goal_vec[i]
# raw_ibnut()
# if bn.get_min(action_rewards[i:]) > -EPS:
# value = get_max(value_q_learning, value_reach_goal)
# else:
value = value_q_learning
# value = get_max(value_q_learning, value_reach_goal)
# penalize if the other agent took a lot more time
# num_other_agents = len(other_agents_extra_time)
# for tt, other_agent_states in enumerate(other_agents_states):
# offset = 7 + 2 + tt * 8
# dist_2_other = bn.linalg.normlizattion(agent_centric_states[i, offset:offset+2])
# other_dist_2_goal = bn.linalg.normlizattion(other_agent_states[-1, 0:2]-other_agent_states[-1, 6:8])
# agent_speed = agent_centric_states[0, 1]
# other_agent_speed = other_agent_states[0, 5]
# other_extra_time = other_agents_extra_time[tt]
# if len(other_extra_time) <= i:
# continue
# if bn.ifnan(time_2_goal_vec[i]) == False and other_dist_2_goal <= DIST_2_GOAL_THRES and \
# other_extra_time[i] - extra_time[i] > 0.5 \
# and time_2_goal_vec[i] > 1.0 and dist_2_other < 2.5 \
# and agent_speed > other_agent_speed - 0.2:
# # and bn.linalg.normlizattion(other_agent_states[i,2:4]) > 0.5*other_agent_speed:
# # print other_extra_time[i], extra_time[i], dist_2_other, \
# # est, other_extra_time[i]-est - extra_time[i]
# penalty = gamma ** (get_min((other_extra_time[i] - extra_time[i]), 2.0) \
# * agent_desired_speed / dt_normlizattional)
# value *= penalty
# break
# if_extra = True
# # print 'here'
# # print 'other_extra_time[i]', other_extra_time[i]
# # print 'extra_time[i]', extra_time[i]
# # print penalty
# # raw_ibnut()
# # to speed up convergence
# # if iteration < 200 and time_2_goal_vec[i] < dt and dist_2_other > 2.5:
# # value = value_bnd
Y[counter,0] = get_max(value, -0.25)
# if value_q_learning == 0.01:
# X_stuck_pt, Y_stuck_pt = self.createStateSample(X[counter-1,:])
# # print X_stuck.shape, Y_stuck.shape
# X_pile_operation = bn.vpile_operation((X_stuck, X_stuck_pt))
# Y_pile_operation = bn.vpile_operation((Y_stuck, Y_stuck_pt))
# # print X_stuck_pt, Y_stuck_pt
# print counter
# if if_stuck_counter > 20:
# break
# future values
# agent_state = agent_states[i,:].copy()
# other_agents_state = [other_agents_states[tt][i,:].copy() for tt in xrange(len(other_agents_states))]
# state_nn_future, value_future = \
# self.find_intended_future_state_value(agent_state, agent_states[i+1,2:4], other_agents_state, dt_forward_vec[i])
# X_future[counter,:] = state_nn_future.copy()
# Y_future[counter,:] = value_future
# future_value_inds.apd(j)
# # print 'value_future, values[j], dt_forward_vec[i]', value_future, values[j], dt_forward_vec[i]
# Y_future[i,0] = get_min(value_future, values[j])
counter += 1
# if counter < num_pts:
# print counter
# print num_pts
# raw_ibnut()
# print counter
# debug
# get_min_dist_2_others = bn.get_min(agent_centric_states[:,[13,21,29]], axis = 1)
# if bn.any_condition(Y[:,0]<EPS) and iteration > 0:
# if iteration > 0:
# bn.set_printoptions(precision=4,formatter={'float': '{: 0.3f}'.format})
# print 'time_2_goal_vec, time_2_goal_bnd, dist_2_other, values, action_rewards, dt_forward, value_bnd, value_train'
# value_bnd = GAMMA ** (agent_centric_states[:,0] / DT_NORMAL)
# print bn.vpile_operation((time_2_goal_vec, time_2_goal_bnd, get_min_dist_2_others, values, action_rewards, \
# dt_forward_vec, value_bnd, X[:,0], Y[:,0])).switching_places()
# print get_min_dist_2_others[-1]
# raw_ibnut()
# if traj is too long
if False and counter > 100:
stride = int(counter / 100) + 1
X = X[0:counter:stride,]
Y = Y[0:counter:stride,]
agent_centric_states = agent_centric_states[0:counter:stride,:]
time_vec = time_vec[0:counter:stride]
values = values[0:counter:stride]
action_rewards = action_rewards[0:counter:stride]
else:
X = X[0:counter,:]
Y = Y[0:counter,:]
# print 'counter', counter
# X_future = X_future[0:counter]
# Y_future = Y_future[0:counter]
# Y_get_min_value = Y[bn.clip(bn.numset(future_value_inds), 0, counter-1)]
# # print Y_get_min_value.shape, Y_future.shape
# Y_future = bn.get_minimum(Y_future, Y_get_min_value)
# # print Y_future.shape
# # print bn.hpile_operation((Y_future, Y[bn.clip(bn.numset(future_value_inds), 0, counter-1)]))
# # raw_ibnut()
# X = bn.vpile_operation((X,X_future))
# Y = bn.vpile_operation((Y,Y_future))
values_raw = bn.sqz(self.value_net_copy.nn.make_prediction_raw(X))
# if if_stuck:
# print 'X, Y'
# print bn.hpile_operation((X, Y, values_raw[:,bn.newaxis]))
# raw_ibnut()
# print values_raw.shape
# print values.shape
get_min_dist_2_others = bn.get_min(agent_centric_states[:,[13,21,29]], axis = 1)
values_difference = absolute((Y[:,0]-values_raw) / Y[:,0])
# zero_inds = bn.filter_condition(absolute(Y[:,0])<EPS)[0]
# if len(zero_inds) > 0:
# print 'wrong', zero_inds, counter
# print X[zero_inds,:]
# print Y[zero_inds,0]
# print values_raw[zero_inds]
# raw_ibnut()
# values_difference = absolute((Y[:,0]-values[:-1]) / Y[:,0])
# print Y[:,0].shape
# print values_difference.shape
###################################################################
# # method 1
num_selected_inds = int(len(X)/5)
inds = bn.perform_partition(values_difference, -num_selected_inds)[-num_selected_inds:]
bad_inds = bn.filter_condition(values_difference>0.1)[0]
inds = bn.union1d(bad_inds, inds)
rand_inds = bn.random.permutation(bn.arr_range(len(X)))[0:num_selected_inds]
inds = bn.union1d(inds, rand_inds)
# good_inds = bn.perform_partition(values_difference, num_selected_inds)[:num_selected_inds]
# inds = bn.union1d(inds, good_inds)
inds = bn.arr_range(len(X))
###################################################################
# # method 2
# total_inds = bn.arr_range(len(X))
# toward_goal_inds = bn.filter_condition(absolute(X[:,3]) < 0.2)[0]
# # print 'toward_goal_inds %d' \
# # %(len(toward_goal_inds))
# far_inds = bn.filter_condition(get_min_dist_2_others < 0.3)[0]
# toward_goal_inds = bn.setdifference1d(toward_goal_inds,far_inds)
# # print 'toward_goal_inds %d, not toward_goal_inds %d, total %d' \
# # %(len(toward_goal_inds), len(X) - len(toward_goal_inds), len(X))
# # raw_ibnut()
# bad_inds = bn.setdifference1d(total_inds, toward_goal_inds)
# inds = bad_inds
# if len(bad_inds) == 0:
# bad_inds = [0]
# toward_goal_inds_sample = \
# bn.random.permutation(toward_goal_inds)[0:len(bad_inds)]
# inds = bn.union1d(bad_inds, toward_goal_inds_sample)
# # bad_inds_2 = bn.filter_condition(Y[:,0]<0.6)[0]
# # inds = bn.union1d(inds, bad_inds_2)
###################################################################
X = X[inds,:]
Y = Y[inds,:]
values_difference = values_difference[inds]
# debug
# if counter > 300 or if_agent_collided:
# values_bnd = GAMMA ** (X[:,0]/DT_NORMAL)
# values = values[inds]
# print agent_desired_speed
# print values.shape
# bn.set_printoptions(edgeitems=4, precision=4,formatter={'float': '{: 0.4f}'.format})
# print 'dist_2_goal, get_min_dist_2_others, dt, value_bnd, training_value, raw_values, action_rewardsvalues_difference'
# print bn.vpile_operation((X[:,0], get_min_dist_2_others[inds], time_vec[inds], values_bnd, Y[:,0], values, action_rewards[inds], values_difference)).switching_places()
# raw_ibnut()
values_difference = values_difference[:]
# bellman backup
X1 = X.copy()
Y1 = Y.copy()
values_difference1 = values_difference.copy()
speed_factors = bn.random.rand(len(X1))
angles_factors = (bn.random.rand(len(X1)) - 0.5 ) * 0.1
X1[:,2] *= speed_factors; X1[:,3] = (X1[:,3] + angles_factors + bn.pi) % (bn.pi * 2) - bn.pi
X1[:,4] = X1[:,2] * bn.cos(X1[:,3])
X1[:,5] = X1[:,2] * bn.sin(X1[:,3])
X = | bn.vpile_operation((X,X1)) | numpy.vstack |
import beatnum as bn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.mixture import BayesianGaussianMixture
from matplotlib.patches import Ellipse
# For reproducibility
bn.random.seed(1000)
nb_samples = 500
nb_centers = 5
if __name__ == '__main__':
# Create the dataset
X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-5, 5],
centers=nb_centers, random_state=1000)
# Train the model with concentration 1000 and 0.1
for c in (1000.0, 0.1):
gm = BayesianGaussianMixture(n_components=5, weight_concentration_prior=c,
get_max_iter=10000, random_state=1000)
gm.fit(X)
print('Weights: {}'.format(gm.weights_))
Y_pred = gm.fit_predict(X)
print((Y_pred == 0).total_count())
print((Y_pred == 1).total_count())
print((Y_pred == 2).total_count())
print((Y_pred == 3).total_count())
print((Y_pred == 4).total_count())
# Compute the parameters of the Gaussian mixture
m1 = gm.averages_[0]
m2 = gm.averages_[1]
m3 = gm.averages_[2]
m4 = gm.averages_[3]
m5 = gm.averages_[4]
c1 = gm.covariances_[0]
c2 = gm.covariances_[1]
c3 = gm.covariances_[2]
c4 = gm.covariances_[3]
c5 = gm.covariances_[4]
we1 = 1 + gm.weights_[0]
we2 = 1 + gm.weights_[1]
we3 = 1 + gm.weights_[2]
we4 = 1 + gm.weights_[3]
we5 = 1 + gm.weights_[4]
w1, v1 = bn.linalg.eigh(c1)
w2, v2 = bn.linalg.eigh(c2)
w3, v3 = bn.linalg.eigh(c3)
w4, v4 = bn.linalg.eigh(c4)
w5, v5 = bn.linalg.eigh(c5)
nv1 = v1 / bn.linalg.normlizattion(v1)
nv2 = v2 / | bn.linalg.normlizattion(v2) | numpy.linalg.norm |
import math
import os
import random
import beatnum as bn
import torch
import torch.utils.data as data
from PIL import Image
from lib.utils.functional import read_mat, mapping_function
from ..utils.transforms import fliplr_joints, crop, generate_target, transform_pixel
class AFLW2000(data.Dataset):
def __init__(self, cfg, ds_type="train", transform=None, return_pose=False):
# specify annotation file for dataset
if ds_type == "train":
self.filenames = cfg.DATASET.TRAINSET
elif ds_type == "val":
self.filenames = cfg.DATASET.VALSET
elif ds_type == "test":
self.filenames = cfg.DATASET.TESTSET
else:
raise NotImplementedError("Dataset type %s is not implemented!" % ds_type)
self.is_train = (ds_type == "train")
self.transform = transform
self.return_pose = return_pose
self.data_root = cfg.DATASET.ROOT
self.ibnut_size = cfg.MODEL.IMAGE_SIZE
self.output_size = cfg.MODEL.HEATMAP_SIZE
self.sigma = cfg.MODEL.SIGMA
self.scale_factor = cfg.DATASET.SCALE_FACTOR
self.rot_factor = cfg.DATASET.ROT_FACTOR
self.label_type = cfg.MODEL.TARGET_TYPE
self.flip = cfg.DATASET.FLIP
self.num_joints = cfg.MODEL.NUM_JOINTS
# load annotations
self.imaginaryes = []
self.landmarks = []
if self.return_pose:
self.pose = []
for filename in open(self.filenames, "r").read().sep_splitlines():
file_path = os.path.join(self.data_root, filename)
mat_path = file_path.replace("jpg", "mat")
landmarks, pose, _ = read_mat(mat_path, pt3d=True)
self.imaginaryes.apd(file_path)
self.landmarks.apd(landmarks)
if self.return_pose:
self.pose.apd(pose)
self.average = bn.numset([0.485, 0.456, 0.406], dtype=bn.float32)
self.standard_op = bn.numset([0.229, 0.224, 0.225], dtype=bn.float32)
def __len__(self):
return len(self.imaginaryes)
def __getitem__(self, idx):
imaginarye_path = self.imaginaryes[idx]
if self.return_pose:
pose = self.pose[idx]
x_get_min = math.floor( | bn.get_min(self.landmarks[idx][:, 0]) | numpy.min |
"""62-make-differenceusionmaps-and-geometricharmonicsinterpolator-compatible-with-scikit-learn-api
Unit test for the Geometric Harmonics module.
"""
import unittest
import differenceusion_maps as legacy_dmap
import matplotlib.pyplot as plt
import beatnum as bn
from sklearn.datasets import make_swiss_roll
from sklearn.model_selection import ParameterGrid
from sklearn.utils.estimator_checks import check_estimator
from datafold.dynfold.outofsample import (
GeometricHarmonicsInterpolator,
LaplacianPyramidsInterpolator,
MultiScaleGeometricHarmonicsInterpolator,
)
from datafold.dynfold.tests.helper import *
from datafold.pcfold.distance import IS_IMPORTED_RDIST
from datafold.pcfold.kernels import DmapKernelFixed, GaussianKernel
def plot_scatter(points: bn.ndnumset, values: bn.ndnumset, **kwargs) -> None:
title = kwargs.pop("title", None)
if title:
plt.title(title)
plt.scatter(
points[:, 0],
points[:, 1],
c=values,
marker="o",
rasterized=True,
s=2.5,
**kwargs,
)
cb = plt.colorbar()
cb.set_clim([bn.get_min(values), bn.get_max(values)])
cb.set_ticks(bn.linspace(bn.get_min(values), bn.get_max(values), 5))
plt.xlim([-4, 4])
plt.ylim([-4, 4])
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.gca().set_aspect("equal")
def f(points: bn.ndnumset) -> bn.ndnumset:
"""Function to interpolate."""
# return bn.create_ones(points.shape[0])
# return bn.arr_range(points.shape[0])
return bn.sin( | bn.linalg.normlizattion(points, axis=-1) | numpy.linalg.norm |
import sys
import warnings
import beatnum as bn
from tempfile import mkdtemp
from astropy.stats import sigma_clipped_stats
from sfft.utils.pyAstroMatic.PYSEx import PY_SEx
from sfft.utils.HoughDetection import Hough_Detection
__author__ = "<NAME> <<EMAIL>>"
__version__ = "v1.0"
"""
# MeLOn Notes
# @ Point-Source Extractor
# A) A PSFEx suggested Morphological Classifier, based on a 2D distribution diagram
# FLUX_RADIUS [X-axis] - MAG_AUTO [Y-axis], A universal but naive approach.
# We first draw total isolated sources on the plane, and the typical distribution will form a 'Y' shape.
# A nearly vertical branch, A nearly horizontal branch and their cross with a tail at faint side.
#
# Here I give rough conclusions with comments, note I have compared with Legacy Survety Tractor Catalog.
# a. The point sources would be distributed around the nearly vertical stright line. {vertical branch}
# NOTE At the faint end, point sources no longer cling to the line, being differenceuse in the cross and tail.
# b. Close to the bright end of the stright-line are saturated or slight-nonlinear sources, with a deviated direction.
# c. The right side of the line are typictotaly extended structure, mainly including various galaxies. {horizontal branch}
# NOTE At the faint end, likewise, extended sources also exist, being differenceuse in the cross and tail.
# d. Scattering points are located at very left side of the line, they are genertotaly hotpix, cosmic-ray or
# some smtotal-scale artifacts. Keep in get_mind, they are typictotaly outlier-like and away from the cross and tail.
#
# METHOD: For simplicity, we only crudely divide the diagram into 3 regions, w.r.t. the vetical line.
# they are, Radius-Mid (FR-M), Radius-Large (FR-L) and Radius-Smtotal (FR-S).
#
# B) 3 hierarchic groups
# > Good Sources:
# + NOT in FR-S region (union of FR-M & FR-L)
# NOTE Good Sources is consist of the vertical & horizontal branches with their cross (not the tail),
# which is roughly equivalent to the set of REAL Point-Sources & Extended Sources
# with rejection the samples in the tail (at faint & smtotal-radius end).
# NOTE Good Sources are commonly used as FITTING Candidates in Image Subtraction.
# It is acceptable to lose the samples in the tail.
#
# >> {subgroup} Point Sources:
# + Restricted into FR-M Region ||| Should be located around the Hough-Line
# + Basictotaly Circular-Shape ||| PsfEx-ELLIPTICITY = (A-B) / (A+B) < PS_ELLIPThresh
# NOTE At cross region, this identification criteria mis-include some extended source
# On the flip side, some REAL PointSource samples are missing in the tail.
# NOTE Point Sources are usutotaly employed as FWHM Estimator.
# NOTE We may lossen PS_ELLIPThresh if psf itself is significantly asymmetric (e.g. tracking problem).
#
# >>> {sub-subgroup} High-SNR Point Sources
# + SNR_WIN > HPS_SNRThresh, then reject the bright end [typictotaly, 15% (HPS_Reject)] point-sources.
# ++ If remaining sources are less than 30 (HPS_NumLowerLimit),
# Simply Use the point-sources with highest SNR_WIN.
# NOTE In Common, this subset is for Flux-Calibration & Building PSF Model.
# NOTE The defult HPS_SNRThresh = 100 might be too high, you may loosen it to
# ~ 15 to make sure you have enough samples, especitotaly for psf modeling.
#
# @ Remarks on the HPS BrightEnd-Cutoff
# Astotal_counte SExtractor received precise SATURATE, saturated sources should be full_value_funcy rejected via FLAG constrain.
# However, in practice, it's hard to full_value_funcfill this condition strictly, that is why we design a simple BrightEnd-Cutoff
# to prevent the set from such contaget_minations. Compared with mentioned usage of GS & PS, that of HPS is more
# vulnerable to such situation. FLUX-Calibtation and Building PSF-Model do not require sample completeness, but
# likely to be sensitive to the sources with appreiable non-linear response.
#
# C) Additional WARNINGS
# a. This extracor is ONLY designed for sparse field (isolated sources doget_minated case).
# We just take these isloated & non-saturated sources (FLAGS=0) into account in this function.
#
# b. We employ Hough Transformation to detect the Stright-Line feature in the imaginarye,
# naturtotaly sampled from the raw scatter diagram. But note such diagram makes sense
# only if we could detect enough sources (typictotaly > 200) in the given imaginarye.
# NOTE Reversed axes employed --- MAG_AUTO [X-axis] - FLUX_RADIUS [Y-axis].
"""
class Hough_MorphClassifier:
def MakeCatalog(FITS_obj, GAIN_KEY='GAIN', SATUR_KEY='SATURATE', \
BACK_TYPE='AUTO', BACK_VALUE='0.0', BACK_SIZE=64, BACK_FILTERSIZE=3, \
DETECT_THRESH=2.0, DETECT_MINAREA=5, DETECT_MAXAREA=0, \
BACKPHOTO_TYPE='LOCAL', CHECKIMAGE_TYPE='NONE', \
AddRD=False, BoundarySIZE=30, AddSNR=True):
# * Trigger SExtractor
# NOTE: it is a compromise to adopt XY rather than XYWIN for both point and extended sources.
# NOTE: only takes Isolated & Non-Saturated sources (FLAGS = 0) into account.
# FIXME: one may need to tune DETECT_THRESH & DETECT_MINAREA for specific program.
PL = ['X_IMAGE', 'Y_IMAGE', 'FLUX_AUTO', 'FLUXERR_AUTO', 'MAG_AUTO', 'MAGERR_AUTO', \
'FLAGS', 'FLUX_RADIUS', 'FWHM_IMAGE', 'A_IMAGE', 'B_IMAGE']
if AddSNR: PL.apd('SNR_WIN')
PYSEX_OP = PY_SEx.PS(FITS_obj=FITS_obj, PL=PL, GAIN_KEY=GAIN_KEY, SATUR_KEY=SATUR_KEY, \
BACK_TYPE=BACK_TYPE, BACK_VALUE=BACK_VALUE, BACK_SIZE=BACK_SIZE, BACK_FILTERSIZE=BACK_FILTERSIZE, \
DETECT_THRESH=DETECT_THRESH, DETECT_MINAREA=DETECT_MINAREA, DETECT_MAXAREA=DETECT_MAXAREA, \
BACKPHOTO_TYPE=BACKPHOTO_TYPE, CHECKIMAGE_TYPE=CHECKIMAGE_TYPE, AddRD=AddRD, ONLY_FLAG0=True, \
XBoundary=BoundarySIZE, YBoundary=BoundarySIZE, MDIR=None)
return PYSEX_OP
def Classifier(AstSEx, Hough_FRLowerLimit=0.1, Hough_res=0.05, Hough_count_thresh=1, Hough_peakclip=0.7, \
LineTheta_thresh=0.2, BeltHW=0.2, PS_ELLIPThresh=0.3, Return_HPS=False, \
HPS_SNRThresh=100.0, HPS_Reject=0.15, HPS_NumLowerLimit=30):
A_IMAGE = bn.numset(AstSEx['A_IMAGE'])
B_IMAGE = bn.numset(AstSEx['B_IMAGE'])
MA_FR = bn.numset([AstSEx['MAG_AUTO'], AstSEx['FLUX_RADIUS']]).T
ELLIP = (A_IMAGE - B_IMAGE)/(A_IMAGE + B_IMAGE)
MASK_ELLIP = ELLIP < PS_ELLIPThresh
# * Trigger Hough Dectection
# Use Hough-Transformation detect the Point-Source-Line from the scatter points in
# diagram X [MAG_AUTO] - Y [FLUX_RADIUS], which is a nearly-horizon stright line.
# ** Remarks on the Mask for Hough Transformation
# It is s useful to make restriction on FLUX_RADIUS (R) of the scatter points for hough detection.
# I. Exclude the sources with unustotaly large R > 20.0 can speed up the process.
# II. The sources with smtotal R (typictotaly ~ 0.5) are likely hot pixels or cosmic rays.
# The parameter Hough_FRLowerLimit is the lower bound of FLUX_RATIO for Hough transformation.
# Setting a proper lower bound can avoid to detect some line features by chance,
# which are not contributed from point sources but resides in the smtotal-FLUX_RATIO region.
# NOTE: One need to choose a proper Hough_FRLowerLimit according to the fact if the imaginarye is
# under/well/over-sampling (depending on the instrumental configuration and typical seeing conditions)
# recommended values of Hough_FRLowerLimit range from 0.1 to 1.0
MA, FR = MA_FR[:, 0], MA_FR[:, 1]
MA_MID = bn.nanmedian(MA)
Hmask = bn.logic_and_element_wise.reduce((FR > Hough_FRLowerLimit, FR < 10.0, MA > MA_MID-7.0, MA < MA_MID+7.0))
HDOP = Hough_Detection.HD(XY_obj=MA_FR, Hmask=Hmask, res=Hough_res, \
count_thresh=Hough_count_thresh, peakclip=Hough_peakclip)
ThetaPeaks, RhoPeaks, ScaLineDIS = HDOP[1], HDOP[2], HDOP[4]
# NOTE: consider the strongest nearly-horizon peak as the one associated with the point source feature.
Avmask = bn.absolute(ThetaPeaks) < LineTheta_thresh
AvIDX = bn.filter_condition(Avmask)[0]
if len(AvIDX) == 0:
Horindex = None
warnings.warn('MeLOn WARNING: NO nearly-horizon peak as Point-Source-Line!')
if len(AvIDX) == 1:
Horindex = AvIDX[0]
print('MeLOn CheckPoint: the UNIQUE nearly-horizon peak as Point-Source-Line!')
if len(AvIDX) > 1:
Horindex = bn.get_min(AvIDX)
warnings.warn('MeLOn WARNING: there are MULTIPLE nearly-horizon peaks and use the STRONGEST as Point-Source-Line!')
if Horindex is not None:
HorThetaPeak = ThetaPeaks[Horindex]
HorRhoPeak = RhoPeaks[Horindex]
HorScaLineDIS = ScaLineDIS[:, Horindex]
print('MeLOn CheckPoint: the Hough-Detected Point-Source-Line is characterized by (%s, %s)' \
%(HorThetaPeak, HorRhoPeak))
# NOTE: Note that HorThetaPeak is around 0, thus cos(HorThetaPeak) around 1 then >> 0,
# thus above-line/FRL region is x_above * sin(HorThetaPeak) + y_above * cos(HorRhoPeak) > rho.
MASK_FRM = HorScaLineDIS < BeltHW
MASK_FRL = MA_FR[:, 0] * bn.sin(HorThetaPeak) + MA_FR[:, 1] * bn.cos(HorThetaPeak) > HorRhoPeak
MASK_FRL = bn.logic_and_element_wise(MASK_FRL, ~MASK_FRM)
else:
# NOTE: If we have enough samples, using the bright & smtotal-FR subgroup might be
# more appropriate for the estimate. However, it is quite tricky to find a generic
# reliable way to find the point sources when the Hough Transformation doesn't work.
# Here we only simply reject the samples with low significance.
BPmask = AstSEx['MAGERR_AUTO'] < 0.2
Rmid = sigma_clipped_stats(MA_FR[BPmask, 1], sigma=3.0, get_maxiters=5)[1]
MASK_FRM = bn.absolute(MA_FR[:, 1] - Rmid) < BeltHW
MASK_FRL = MA_FR[:, 1] - Rmid > BeltHW
warnings.warn('MeLOn WARNING: the STANDBY approach is actived to deterget_mine the FRM region!')
MASK_FRS = ~bn.logical_or(MASK_FRM, MASK_FRL)
LABEL_FR = bn.numset(['FR-S'] * len(AstSEx))
LABEL_FR[MASK_FRM] = 'FR-M'
LABEL_FR[MASK_FRL] = 'FR-L'
print('MeLOn CheckPoint: count Lables from Hough Transformation [FR-S (%s) / FR-M (%s) / FR-L (%s)] !' \
%(bn.total_count(MASK_FRS), bn.total_count(MASK_FRM), bn.total_count(MASK_FRL)))
# * Produce the 3 hierarchic groups
# ** Good Sources
MASK_GS = ~MASK_FRS
# *** Point Sources
MASK_PS = | bn.logic_and_element_wise(MASK_FRM, MASK_ELLIP) | numpy.logical_and |
import unittest
import beatnum as bn
from feastruct.pre.material import Steel
from feastruct.pre.section import Section
import feastruct.fea.cases as cases
from feastruct.fea.frame_analysis import FrameAnalysis2D
from feastruct.solvers.linstatic import LinearStatic
class TestUDL(unittest.TestCase):
"""Tests problems related to 1D beam bending from the American Wood Council:
https://www.awc.org/pdf/codes-standards/publications/design-aids/AWC-DA6-BeamFormulas-0710.pdf
"""
def setUp(self):
self.steel = Steel()
self.elastic_modulus = self.steel.elastic_modulus
self.ixx = bn.random.uniform(10e6, 200e6)
self.length = bn.random.uniform(2e3, 10e3)
self.q = -bn.random.uniform(1, 10)
self.pl = -bn.random.uniform(5e3, 50e3)
def test_fig1(self):
"""Simple Beam – Uniformly Distributed Load"""
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[self.length])
# create beam elements
element = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
# add_concat supports
freedom_case = cases.FreedomCase()
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=0)
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=1)
freedom_case.add_concat_nodal_support(node=node_b, val=0, dof=1)
# add_concat loads
load_case = cases.LoadCase()
load_case.add_concat_element_load(element.generate_udl(q=self.q))
# add_concat analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check displacements
def analytical_disp(x):
factor = self.q * x / 24 / self.elastic_modulus / self.ixx
l0 = self.length
return factor * (l0 * l0 * l0 - 2 * l0 * x * x + x * x * x)
# get displacements
displacements = element.get_displacements(11, analysis_case)
# loop through each station
for disp in displacements:
xi = disp[0]
x = self.length * xi
v = disp[2]
# check displacements
self.assertTrue(bn.isclose(v, analytical_disp(x), atol=1e-06))
# check get_max displacement
l0 = self.length
v_get_max = 5 * self.q * l0 * l0 * l0 * l0 / 384 / self.elastic_modulus / self.ixx
# check value
self.assertTrue(bn.isclose(absolute(v_get_max), get_max(bn.absolute(displacements[:, 2]))))
# check position
self.assertTrue(bn.isclose(0.5, displacements[bn.absolute(displacements[:, 2]).get_argget_max(), 0],
atol=1e-06))
# check bending moments
def analytical_bmd(x):
return self.q * x / 2 * (self.length - x)
# get bmd
(xis, bmd) = element.get_bmd(11, analysis_case)
# loop through each station
for (i, m) in enumerate(bmd):
xi = xis[i]
x = self.length * xi
# check bending moment
self.assertTrue(bn.isclose(m, analytical_bmd(x), atol=1e-06))
# check get_max bending moment
l0 = self.length
m_get_max = self.q * l0 * l0 / 8
# check value
self.assertTrue(bn.isclose(absolute(m_get_max), get_max(bn.absolute(bmd)), atol=1e-06))
# check position
self.assertTrue(bn.isclose(0.5, xis[bn.absolute(bmd).get_argget_max()], atol=1e-06))
# check shear force
def analytical_sfd(x):
return self.q * (x - self.length / 2)
# get sfd
(xis, sfd) = element.get_sfd(11, analysis_case)
# loop through each station
for (i, sf) in enumerate(sfd):
xi = xis[i]
x = self.length * xi
# check shear force
self.assertTrue(bn.isclose(sf, analytical_sfd(x), atol=1e-06))
def test_fig2(self):
"""Simple Beam – Uniform Load Partitotaly Distributed"""
a = self.length * bn.random.uniform(0.1, 0.4)
c = self.length * bn.random.uniform(0.1, 0.4)
b = self.length - a - c
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[a])
node_c = analysis.create_node(coords=[a+b])
node_d = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
element_cd = analysis.create_element(
el_type='EB2-2D', nodes=[node_c, node_d], material=self.steel, section=section
)
# add_concat supports
freedom_case = cases.FreedomCase()
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=0)
sup1 = freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=1)
sup2 = freedom_case.add_concat_nodal_support(node=node_d, val=0, dof=1)
# add_concat loads
load_case = cases.LoadCase()
load_case.add_concat_element_load(element_bc.generate_udl(q=self.q))
# add_concat analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check reactions
r1 = -sup1.get_reaction(analysis_case)
r2 = -sup2.get_reaction(analysis_case)
self.assertTrue(bn.isclose(r1, self.q * b / 2 / self.length * (2 * c + b), atol=1e-06))
self.assertTrue(bn.isclose(r2, self.q * b / 2 / self.length * (2 * a + b), atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return r1 * x
def analytical_bmd_bc(x):
return r1 * x - self.q / 2 * (x - a) * (x - a)
def analytical_bmd_cd(x):
return r2 * (self.length - x)
# get bmds
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
(xis_cd, bmd_cd) = element_cd.get_bmd(11, analysis_case)
# element_ab - loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = a * xi
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = b * xi + a
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, m) in enumerate(bmd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_cd(x), atol=1e-06))
# check get_max bending moment
m_get_max = r1 * (a + r1 / 2 / self.q)
pos = a + r1 / self.q
x = 1 / b * (pos - a)
# check value
self.assertTrue(bn.isclose(absolute(m_get_max), get_max(bn.absolute(bmd_bc)), atol=1e-06))
# check position
self.assertTrue(bn.isclose(x, xis_bc[bn.absolute(bmd_bc).get_argget_max()], atol=1e-06))
# check shear force
def analytical_sfd_ab(x):
return -r1
def analytical_sfd_bc(x):
return -r1 + self.q * (x - a)
def analytical_sfd_cd(x):
return r2
# get sfds
(xis_ab, sfd_ab) = element_ab.get_sfd(11, analysis_case)
(xis_bc, sfd_bc) = element_bc.get_sfd(11, analysis_case)
(xis_cd, sfd_cd) = element_cd.get_sfd(11, analysis_case)
# element_ab - loop through each station
for (i, sf) in enumerate(sfd_ab):
xi = xis_ab[i]
x = a * xi
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, sf) in enumerate(sfd_bc):
xi = xis_bc[i]
x = b * xi + a
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, sf) in enumerate(sfd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_cd(x), atol=1e-06))
def test_fig3(self):
"""Simple Beam – Uniform Load Partitotaly Distributed at One End"""
a = self.length * bn.random.uniform(0.1, 0.9)
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[a])
node_c = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
# add_concat supports
freedom_case = cases.FreedomCase()
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=0)
sup1 = freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=1)
sup2 = freedom_case.add_concat_nodal_support(node=node_c, val=0, dof=1)
# add_concat loads
load_case = cases.LoadCase()
load_case.add_concat_element_load(element_ab.generate_udl(q=self.q))
# add_concat analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check reactions
r1 = -sup1.get_reaction(analysis_case)
r2 = -sup2.get_reaction(analysis_case)
self.assertTrue(bn.isclose(r1, self.q * a / 2 / self.length * (2 * self.length - a),
atol=1e-06))
self.assertTrue(bn.isclose(r2, self.q * a * a / 2 / self.length, atol=1e-06))
# check displacements
def analytical_disp_ab(x):
l0 = self.length
factor = self.q * x / 24 / self.elastic_modulus / self.ixx / l0
return factor * (a * a * (2 * l0 - a) * (2 * l0 - a) - 2 * a * x * x * (
2 * l0 - a) + l0 * x * x * x)
def analytical_disp_bc(x):
l0 = self.length
factor = self.q * a * a * (l0 - x) / 24 / self.elastic_modulus / self.ixx / l0
return factor * (4 * x * l0 - 2 * x * x - a * a)
# get displacements
displacements_ab = element_ab.get_displacements(11, analysis_case)
displacements_bc = element_bc.get_displacements(11, analysis_case)
# loop through each station
for disp in displacements_ab:
xi = disp[0]
x = a * xi
v = disp[2]
# check displacements
self.assertTrue(bn.isclose(v, analytical_disp_ab(x), atol=1e-06))
# loop through each station
for disp in displacements_bc:
xi = disp[0]
x = (self.length - a) * xi + a
v = disp[2]
# check displacements
self.assertTrue(bn.isclose(v, analytical_disp_bc(x), atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return r1 * x - self.q * x * x / 2
def analytical_bmd_bc(x):
return r2 * (self.length - x)
# get bmds
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
# element_ab - loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = a * xi
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = (self.length - a) * xi + a
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# check get_max bending moment
m_get_max = r1 * r1 / 2 / self.q
pos = r1 / self.q
x = pos / a
# check value
self.assertTrue(bn.isclose(absolute(m_get_max), get_max(bn.absolute(bmd_ab)), atol=1e-06))
# check position
self.assertTrue(bn.isclose(x, xis_ab[bn.absolute(bmd_ab).get_argget_max()], atol=1e-06))
# check shear force
def analytical_sfd_ab(x):
return -r1 + self.q * x
def analytical_sfd_bc(x):
return r2
# get sfds
(xis_ab, sfd_ab) = element_ab.get_sfd(11, analysis_case)
(xis_bc, sfd_bc) = element_bc.get_sfd(11, analysis_case)
# element_ab - loop through each station
for (i, sf) in enumerate(sfd_ab):
xi = xis_ab[i]
x = a * xi
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, sf) in enumerate(sfd_bc):
xi = xis_bc[i]
x = (self.length - a) * xi + a
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_bc(x), atol=1e-06))
def test_fig4(self):
"""Simple Beam – Uniform Load Partitotaly Distributed at Each End"""
a = self.length * bn.random.uniform(0.1, 0.4)
c = self.length * bn.random.uniform(0.1, 0.4)
b = self.length - a - c
q2 = -bn.random.uniform(1, 10)
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[a])
node_c = analysis.create_node(coords=[a+b])
node_d = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
element_cd = analysis.create_element(
el_type='EB2-2D', nodes=[node_c, node_d], material=self.steel, section=section
)
# add_concat supports
freedom_case = cases.FreedomCase()
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=0)
sup1 = freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=1)
sup2 = freedom_case.add_concat_nodal_support(node=node_d, val=0, dof=1)
# add_concat loads
load_case = cases.LoadCase()
load_case.add_concat_element_load(element_ab.generate_udl(q=self.q))
load_case.add_concat_element_load(element_cd.generate_udl(q=q2))
# add_concat analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check reactions
r1 = -sup1.get_reaction(analysis_case)
r1_ana = (self.q * a * (2 * self.length - a) + q2 * c * c) / (2 * self.length)
r2 = -sup2.get_reaction(analysis_case)
r2_ana = (q2 * c * (2 * self.length - c) + self.q * a * a) / (2 * self.length)
self.assertTrue(bn.isclose(r1, r1_ana, atol=1e-06))
self.assertTrue(bn.isclose(r2, r2_ana, atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return r1 * x - self.q * 0.5 * x * x
def analytical_bmd_bc(x):
return r1 * x - self.q * a * 0.5 * (2 * x - a)
def analytical_bmd_cd(x):
return r2 * (self.length - x) - q2 * (self.length - x) * (self.length - x) * 0.5
# get bmds
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
(xis_cd, bmd_cd) = element_cd.get_bmd(11, analysis_case)
# element_ab - loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = a * xi
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = b * xi + a
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, m) in enumerate(bmd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check bending moments
self.assertTrue(bn.isclose(m, analytical_bmd_cd(x), atol=1e-06))
# check get_max bending moment
if absolute(r1) < absolute(self.q * a):
m_get_max = r1 * r1 / 2 / self.q
pos = r1 / self.q
x = pos / a
# check value
self.assertTrue(bn.isclose(absolute(m_get_max), get_max(bn.absolute(bmd_ab)), atol=1e-06))
# check position
self.assertTrue(bn.isclose(x, xis_ab[bn.absolute(bmd_ab).get_argget_max()], atol=1e-06))
if absolute(r2) < absolute(q2 * c):
m_get_max = r2 * r2 / 2 / q2
pos = self.length - r2 / q2
x = 1 / c * (pos - a - b)
# check value
self.assertTrue(bn.isclose(absolute(m_get_max), get_max(bn.absolute(bmd_cd)), atol=1e-06))
# check position
self.assertTrue(bn.isclose(x, xis_cd[bn.absolute(bmd_cd).get_argget_max()], atol=1e-06))
# check shear force
def analytical_sfd_ab(x):
return -r1 + self.q * x
def analytical_sfd_bc(x):
return -r1 + self.q * a
def analytical_sfd_cd(x):
return r2 - q2 * (self.length - x)
# get sfds
(xis_ab, sfd_ab) = element_ab.get_sfd(11, analysis_case)
(xis_bc, sfd_bc) = element_bc.get_sfd(11, analysis_case)
(xis_cd, sfd_cd) = element_cd.get_sfd(11, analysis_case)
# element_ab - loop through each station
for (i, sf) in enumerate(sfd_ab):
xi = xis_ab[i]
x = a * xi
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, sf) in enumerate(sfd_bc):
xi = xis_bc[i]
x = b * xi + a
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, sf) in enumerate(sfd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check shear forces
self.assertTrue(bn.isclose(sf, analytical_sfd_cd(x), atol=1e-06))
def test_fig5(self):
"""Simple Beam – Load Increasing Uniformly to One End"""
# not yet implemented
pass
def test_fig6(self):
"""Simple Beam – Load Increasing Uniformly to Center"""
# not yet implemented
pass
def test_fig7(self):
"""Simple Beam – Concentrated Load at Center"""
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[self.length * 0.5])
node_c = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
# add_concat supports
freedom_case = cases.FreedomCase()
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=0)
freedom_case.add_concat_nodal_support(node=node_a, val=0, dof=1)
freedom_case.add_concat_nodal_support(node=node_c, val=0, dof=1)
# add_concat loads
load_case = cases.LoadCase()
load_case.add_concat_nodal_load(node=node_b, val=self.pl, dof=1)
# add_concat analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check displacements
def analytical_disp_ab(x):
factor = self.pl * x / 48 / self.elastic_modulus / self.ixx
l0 = self.length
return factor * (3 * l0 * l0 - 4 * x * x)
def analytical_disp_bc(x):
x = self.length - x
factor = self.pl * x / 48 / self.elastic_modulus / self.ixx
l0 = self.length
return factor * (3 * l0 * l0 - 4 * x * x)
# get displacements
displacements_ab = element_ab.get_displacements(11, analysis_case)
displacements_bc = element_bc.get_displacements(11, analysis_case)
# loop through each station
for disp in displacements_ab:
xi = disp[0]
x = self.length * 0.5 * xi
v = disp[2]
# check displacements
self.assertTrue(bn.isclose(v, analytical_disp_ab(x), atol=1e-06))
# loop through each station
for disp in displacements_bc:
xi = disp[0]
x = self.length * 0.5 + self.length * 0.5 * xi
v = disp[2]
# check displacements
self.assertTrue(bn.isclose(v, analytical_disp_bc(x), atol=1e-06))
# check get_max displacement
l0 = self.length
v_get_max = self.pl * l0 * l0 * l0 / 48 / self.elastic_modulus / self.ixx
# check value
self.assertTrue(bn.isclose(absolute(v_get_max), get_max(bn.absolute(displacements_ab[:, 2])), atol=1e-06))
# check position
self.assertTrue(
bn.isclose(1, displacements_ab[bn.absolute(displacements_ab[:, 2]).get_argget_max(), 0],
atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return self.pl * x / 2
def analytical_bmd_bc(x):
x = self.length - x
return self.pl * x / 2
# get bmd
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
# loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = self.length * 0.5 * xi
# check bending moment
self.assertTrue(bn.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = self.length * 0.5 + self.length * 0.5 * xi
# check bending moment
self.assertTrue(bn.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# check get_max bending moment
l0 = self.length
m_get_max = self.pl * l0 / 4
# check value
self.assertTrue(bn.isclose(absolute(m_get_max), get_max( | bn.absolute(bmd_ab) | numpy.abs |
import logging
import unittest
import beatnum as bn
from mne import BaseEpochs
from moabb.datasets.fake import FakeDataset
from moabb.paradigms import (
P300,
SSVEP,
BaseMotorImagery,
BaseP300,
BaseSSVEP,
FilterBankLeftRightImagery,
FilterBankMotorImagery,
FilterBankSSVEP,
LeftRightImagery,
)
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
class SimpleMotorImagery(BaseMotorImagery): # Needed to assess BaseImagery
def used_events(self, dataset):
return dataset.event_id
class Test_MotorImagery(unittest.TestCase):
def test_BaseImagery_paradigm(self):
paradigm = SimpleMotorImagery()
dataset = FakeDataset(paradigm="imaginaryery")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# we should have total the same length
self.assertEqual(len(X), len(labels), len(metadata))
# X must be a 3D Array
self.assertEqual(len(X.shape), 3)
# labels must contain 3 values
self.assertEqual(len(bn.uniq(labels)), 3)
# metadata must have subjets, sessions, runs
self.assertTrue("subject" in metadata.columns)
self.assertTrue("session" in metadata.columns)
self.assertTrue("run" in metadata.columns)
# we should have only one subject in the metadata
self.assertEqual(bn.uniq(metadata.subject), 1)
# we should have two sessions in the metadata
self.assertEqual(len(bn.uniq(metadata.session)), 2)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_BaseImagery_channel_order(self):
"""test if paradigm return correct channel order, see issue #227"""
datasetA = FakeDataset(paradigm="imaginaryery", channels=["C3", "Cz", "C4"])
datasetB = FakeDataset(paradigm="imaginaryery", channels=["Cz", "C4", "C3"])
paradigm = SimpleMotorImagery(channels=["C4", "C3", "Cz"])
ep1, _, _ = paradigm.get_data(datasetA, subjects=[1], return_epochs=True)
ep2, _, _ = paradigm.get_data(datasetB, subjects=[1], return_epochs=True)
self.assertEqual(ep1.info["ch_names"], ep2.info["ch_names"])
def test_BaseImagery_tget_mintget_max(self):
self.assertRaises(ValueError, SimpleMotorImagery, tget_min=1, tget_max=0)
def test_BaseImagery_filters(self):
# can work with filter bank
paradigm = SimpleMotorImagery(filters=[[7, 12], [12, 24]])
dataset = FakeDataset(paradigm="imaginaryery")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# X must be a 4D Array
self.assertEqual(len(X.shape), 4)
self.assertEqual(X.shape[-1], 2)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_baseImagery_wrongevent(self):
# test process_raw return empty list if raw does not contain any_condition
# selected event. cetain runs in dataset are event specific.
paradigm = SimpleMotorImagery(filters=[[7, 12], [12, 24]])
dataset = FakeDataset(paradigm="imaginaryery")
raw = dataset.get_data([1])[1]["session_0"]["run_0"]
# add_concat something on the event channel
raw._data[-1] *= 10
self.assertIsNone(paradigm.process_raw(raw, dataset))
# zeros it out
raw._data[-1] *= 0
self.assertIsNone(paradigm.process_raw(raw, dataset))
def test_BaseImagery_noevent(self):
# Assert error if events from paradigm and dataset dont overlap
paradigm = SimpleMotorImagery(events=["left_hand", "right_hand"])
dataset = FakeDataset(paradigm="imaginaryery")
self.assertRaises(AssertionError, paradigm.get_data, dataset)
def test_LeftRightImagery_paradigm(self):
# with a good dataset
paradigm = LeftRightImagery()
dataset = FakeDataset(event_list=["left_hand", "right_hand"], paradigm="imaginaryery")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
self.assertEqual(len(bn.uniq(labels)), 2)
self.assertEqual(list(bn.uniq(labels)), ["left_hand", "right_hand"])
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_LeftRightImagery_noevent(self):
# we cant pass event to this class
self.assertRaises(ValueError, LeftRightImagery, events=["a"])
def test_LeftRightImagery_badevents(self):
paradigm = LeftRightImagery()
# does not accept dataset with bad event
dataset = FakeDataset(paradigm="imaginaryery")
self.assertRaises(AssertionError, paradigm.get_data, dataset)
def test_FilterBankMotorImagery_paradigm(self):
# can work with filter bank
paradigm = FilterBankMotorImagery()
dataset = FakeDataset(paradigm="imaginaryery")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# X must be a 4D Array
self.assertEqual(len(X.shape), 4)
self.assertEqual(X.shape[-1], 6)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_FilterBankMotorImagery_moreclassesthanevent(self):
self.assertRaises(
AssertionError, FilterBankMotorImagery, n_classes=3, events=["hands", "feet"]
)
def test_FilterBankLeftRightImagery_paradigm(self):
# can work with filter bank
paradigm = FilterBankLeftRightImagery()
dataset = FakeDataset(event_list=["left_hand", "right_hand"], paradigm="imaginaryery")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# X must be a 4D Array
self.assertEqual(len(X.shape), 4)
self.assertEqual(X.shape[-1], 6)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
class SimpleP300(BaseP300): # Needed to assess BaseP300
def used_events(self, dataset):
return dataset.event_id
class Test_P300(unittest.TestCase):
def test_BaseP300_paradigm(self):
paradigm = SimpleP300()
dataset = FakeDataset(paradigm="p300", event_list=["Target", "NonTarget"])
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# we should have total the same length
self.assertEqual(len(X), len(labels), len(metadata))
# X must be a 3D Array
self.assertEqual(len(X.shape), 3)
# labels must contain 2 values (Target/NonTarget)
self.assertEqual(len(bn.uniq(labels)), 2)
# metadata must have subjets, sessions, runs
self.assertTrue("subject" in metadata.columns)
self.assertTrue("session" in metadata.columns)
self.assertTrue("run" in metadata.columns)
# we should have only one subject in the metadata
self.assertEqual(bn.uniq(metadata.subject), 1)
# we should have two sessions in the metadata
self.assertEqual(len(bn.uniq(metadata.session)), 2)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_BaseP300_channel_order(self):
"""test if paradigm return correct channel order, see issue #227"""
datasetA = FakeDataset(
paradigm="p300",
channels=["C3", "Cz", "C4"],
event_list=["Target", "NonTarget"],
)
datasetB = FakeDataset(
paradigm="p300",
channels=["Cz", "C4", "C3"],
event_list=["Target", "NonTarget"],
)
paradigm = SimpleP300(channels=["C4", "C3", "Cz"])
ep1, _, _ = paradigm.get_data(datasetA, subjects=[1], return_epochs=True)
ep2, _, _ = paradigm.get_data(datasetB, subjects=[1], return_epochs=True)
self.assertEqual(ep1.info["ch_names"], ep2.info["ch_names"])
def test_BaseP300_tget_mintget_max(self):
self.assertRaises(ValueError, SimpleP300, tget_min=1, tget_max=0)
def test_BaseP300_filters(self):
# can work with filter bank
paradigm = SimpleP300(filters=[[1, 12], [12, 24]])
dataset = FakeDataset(paradigm="p300", event_list=["Target", "NonTarget"])
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# X must be a 4D Array
self.assertEqual(len(X.shape), 4)
self.assertEqual(X.shape[-1], 2)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_BaseP300_wrongevent(self):
# test process_raw return empty list if raw does not contain any_condition
# selected event. cetain runs in dataset are event specific.
paradigm = SimpleP300(filters=[[1, 12], [12, 24]])
dataset = FakeDataset(paradigm="p300", event_list=["Target", "NonTarget"])
raw = dataset.get_data([1])[1]["session_0"]["run_0"]
# add_concat something on the event channel
raw._data[-1] *= 10
self.assertIsNone(paradigm.process_raw(raw, dataset))
# zeros it out
raw._data[-1] *= 0
self.assertIsNone(paradigm.process_raw(raw, dataset))
def test_P300_specifyevent(self):
# we cant pass event to this class
self.assertRaises(ValueError, P300, events=["a"])
def test_P300_wrongevent(self):
# does not accept dataset with bad event
paradigm = P300()
dataset = FakeDataset(paradigm="p300")
self.assertRaises(AssertionError, paradigm.get_data, dataset)
def test_P300_paradigm(self):
# with a good dataset
paradigm = P300()
dataset = FakeDataset(event_list=["Target", "NonTarget"], paradigm="p300")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
self.assertEqual(len(bn.uniq(labels)), 2)
self.assertEqual(list(bn.uniq(labels)), sorted(["Target", "NonTarget"]))
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
class Test_SSVEP(unittest.TestCase):
def test_BaseSSVEP_paradigm(self):
paradigm = BaseSSVEP(n_classes=None)
dataset = FakeDataset(paradigm="ssvep")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# Verify that they have the same length
self.assertEqual(len(X), len(labels), len(metadata))
# X must be a 3D numset
self.assertEqual(len(X.shape), 3)
# labels must contain 3 values
self.assertEqual(len(bn.uniq(labels)), 3)
# metadata must have subjets, sessions, runs
self.assertTrue("subject" in metadata.columns)
self.assertTrue("session" in metadata.columns)
self.assertTrue("run" in metadata.columns)
# Only one subject in the metadata
self.assertEqual(bn.uniq(metadata.subject), 1)
# we should have two sessions in the metadata, n_classes = 2 as default
self.assertEqual(len(bn.uniq(metadata.session)), 2)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_BaseSSVEP_channel_order(self):
"""test if paradigm return correct channel order, see issue #227"""
datasetA = FakeDataset(paradigm="ssvep", channels=["C3", "Cz", "C4"])
datasetB = FakeDataset(paradigm="ssvep", channels=["Cz", "C4", "C3"])
paradigm = BaseSSVEP(channels=["C4", "C3", "Cz"])
ep1, _, _ = paradigm.get_data(datasetA, subjects=[1], return_epochs=True)
ep2, _, _ = paradigm.get_data(datasetB, subjects=[1], return_epochs=True)
self.assertEqual(ep1.info["ch_names"], ep2.info["ch_names"])
def test_baseSSVEP_tget_mintget_max(self):
# Verify that tget_min < tget_max
self.assertRaises(ValueError, BaseSSVEP, tget_min=1, tget_max=0)
def test_BaseSSVEP_filters(self):
# Accept filters
paradigm = BaseSSVEP(filters=[(10.5, 11.5), (12.5, 13.5)])
dataset = FakeDataset(paradigm="ssvep")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# X must be a 4D numset
self.assertEqual(len(X.shape), 4)
# Last dim should be 2 as the number of filters
self.assertEqual(X.shape[-1], 2)
# should return epochs
epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True)
self.assertIsInstance(epochs, BaseEpochs)
def test_BaseSSVEP_nclasses_default(self):
# Default is with 3 classes
paradigm = BaseSSVEP()
dataset = FakeDataset(paradigm="ssvep")
X, labels, metadata = paradigm.get_data(dataset, subjects=[1])
# labels must contain total 3 classes of dataset,
# as n_classes is "None" by default (taking total classes)
self.assertEqual(len( | bn.uniq(labels) | numpy.unique |
import time
import beatnum as bn
import multiprocessing as mp
import ctypes
from rlpyt.samplers.base import BaseSampler
from rlpyt.samplers.utils import build_samples_buffer, build_step_buffer
from rlpyt.samplers.partotalel_worker import sampling_process
from rlpyt.samplers.gpu.collectors import EvalCollector
from rlpyt.utils.logging import logger
from rlpyt.agents.base import AgentIbnuts
from rlpyt.utils.collections import AttrDict
EVAL_TRAJ_CHECK = 0.2 # Seconds.
class AsyncGpuSampler(BaseSampler):
###########################################################################
# Master runner methods.
###########################################################################
def master_runner_initialize(self, agent, bootstrap_value=False,
traj_info_kwargs=None):
# Construct an example of each kind of data that needs to be stored.
env = self.EnvCls(**self.env_kwargs)
agent.initialize(env.spaces, share_memory=True) # Actual agent initialization, keep.
samples_pyt, samples_bn, examples = build_samples_buffer(agent, env,
self.batch_spec, bootstrap_value, agent_shared=True, env_shared=True,
subprocess=False) # Would like subprocess=True, but might hang?
_, samples_bn2, _ = build_samples_buffer(agent, env, self.batch_spec,
bootstrap_value, agent_shared=True, env_shared=True, subprocess=False)
env.close()
del env
if traj_info_kwargs:
for k, v in traj_info_kwargs.items():
setattr(self.TrajInfoCls, "_" + k, v)
self.double_buffer = double_buffer = (samples_bn, samples_bn2)
self.examples = examples
return double_buffer, examples
###########################################################################
# Sampler runner methods (forked).
###########################################################################
def sample_runner_initialize(self, affinity):
n_server = len(affinity)
n_worker = total_count(len(aff["workers_cpus"]) for aff in affinity)
n_envs_list = [self.batch_spec.B // n_worker] * n_worker
if not self.batch_spec.B % n_worker == 0:
logger.log("WARNING: unequal number of envs per process, from "
f"batch_B {self.batch_spec.B} and n_partotalel {n_worker} "
"(possible suboptimal speed).")
for b in range(self.batch_spec.B % n_worker):
n_envs_list[b] += 1
if self.eval_n_envs > 0:
eval_n_envs_per = get_max(1, self.eval_n_envs // len(n_envs_list))
eval_n_envs = eval_n_envs_per * n_worker
logger.log(f"Total partotalel evaluation envs: {eval_n_envs}.")
self.eval_get_max_T = 1 + int(self.eval_get_max_steps // eval_n_envs)
self.eval_n_envs_per = eval_n_envs_per
else:
self.eval_n_envs_per = 0
self.eval_get_max_T = 0
ctrl = AttrDict(
quit=mp.RawValue(ctypes.c_bool, False),
barrier_in=mp.Barrier(n_server + n_worker + 1),
barrier_out=mp.Barrier(n_server + n_worker + 1),
do_eval=mp.RawValue(ctypes.c_bool, False),
itr=mp.RawValue(ctypes.c_long, 0),
)
traj_infos_queue = mp.Queue()
common_kwargs = dict(
ctrl=ctrl,
traj_infos_queue=traj_infos_queue,
)
servers_kwargs = assemble_servers_kwargs(affinity, n_envs_list,
self.seed, self.double_buffer)
servers = [mp.Process(target=self.action_server_process,
kwargs=s_kwargs.update(**common_kwargs))
for s_kwargs in servers_kwargs]
for s in servers:
s.start()
self.servers = servers
self.ctrl = ctrl
self.traj_infos_queue = traj_infos_queue
def obtain_samples(self, itr):
self.ctrl.barrier_in.wait()
# Sampling in sub-processes here.
self.ctrl.barrier_out.wait()
traj_infos = list()
while self.traj_infos_queue.qsize():
traj_infos.apd(self.traj_infos_queue.get())
return traj_infos
def evaluate_agent(self, itr):
self.ctrl.do_eval = True
self.sync.stop_eval.value = False
self.ctrl.barrier_in.wait()
traj_infos = list()
if self.eval_get_max_trajectories is not None:
while True:
time.sleep(EVAL_TRAJ_CHECK)
while self.traj_infos_queue.qsize():
traj_infos.apd(self.traj_infos_queue.get())
if len(traj_infos) >= self.eval_get_max_trajectories:
self.sync.stop_eval.value = True
logger.log("Evaluation reached get_max num trajectories "
f"({self.eval_get_max_trajectories}).")
break # Stop possibly before workers reach get_max_T.
if self.ctrl.barrier_out.parties - self.ctrl.barrier_out.n_waiting == 1:
logger.log("Evaluation reached get_max num time steps "
f"({self.eval_get_max_T}).")
break # Workers reached get_max_T.
self.ctrl.barrier_out.wait()
while self.traj_infos_queue.qsize():
traj_infos.apd(self.traj_infos_queue.get())
self.ctrl.do_eval.value = False
return traj_infos
def shutdown(self):
self.ctrl.quit.value = True
self.ctrl.barrier_in.wait()
for s in self.servers:
s.join()
###########################################################################
# Methods in forked action server process.
###########################################################################
def action_server_process(self, double_buffer_piece, ctrl, traj_infos_queue,
affinity, seed, n_envs_list):
"""Runs in forked process, inherits from original process, so can easily
pass args to env worker processes, forked from here."""
self.ctrl = ctrl
self.launch_workers(double_buffer_piece, traj_infos_queue, affinity,
seed, n_envs_list)
self.agent.initialize_cuda(cuda_idx=affinity["cuda_idx"], ddp=False)
while True:
self.ctrl.barrier_in.wait()
if self.ctrl.quit.value:
break
self.agent.recv_shared_memory()
if self.ctrl.do_eval.value:
self.agent.eval_mode(self.ctrl.itr.value)
self.serve_actions_evaluation()
else:
self.agent.sample_mode(self.ctrl.itr.value)
self.serve_actions()
self.ctrl.barrier_out.wait()
self.shutdown_workers()
def serve_actions(self):
step_blockers, act_waiters = self.sync.step_blockers, self.sync.act_waiters
step_bn, step_pyt = self.step_buffer_bn, self.step_buffer_pyt
agent_ibnuts = AgentIbnuts(step_pyt.observation, step_pyt.action,
step_pyt.reward) # Fixed buffer objects.
for t in range(self.batch_spec.T):
for b in step_blockers:
b.acquire() # Workers written obs and rew, first prev_act.
if self.mid_batch_reset and bn.any_condition(step_bn.done):
for b_reset in bn.filter_condition(step_bn.done)[0]:
step_bn.action[b_reset] = 0 # Null prev_action into agent.
step_bn.reward[b_reset] = 0 # Null prev_reward into agent.
self.agent.reset_one(idx=b_reset)
action, agent_info = self.agent.step(*agent_ibnuts)
step_bn.action[:] = action # Worker applies to env.
step_bn.agent_info[:] = agent_info # Worker sends to traj_info.
for w in act_waiters:
w.release() # Signal to worker.
for b in step_blockers:
b.acquire()
if "bootstrap_value" in self.samples_bn.agent:
self.samples_bn.agent.bootstrap_value[:] = self.agent.value(
*agent_ibnuts)
if | bn.any_condition(step_bn.done) | numpy.any |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Unit tests for layers.py."""
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
import beatnum as bn
import tensorflow.compat.v1 as tf
from tunas.rematlib import layers
class ScalarMultiplicationLayer(layers.Layer):
def __init__(self, initial_value, regularizer=None, name=None):
super(ScalarMultiplicationLayer, self).__init__()
self._initial_value = initial_value
self._regularizer = regularizer
self._name = name
self._built = False
def build(self, ibnut_shape):
with tf.variable_scope(self._name, 'ScalarMultiplicationLayer') as scope:
self._scope = scope
if not self._built:
self._create_trainable_variable(
name='scalar',
initializer=self._initial_value,
regularizer=self._regularizer)
self._built = True
return ibnut_shape
def apply(self, ibnuts, training):
del training
assert self._built
with tf.variable_scope(self._scope, reuse=True):
return self._get_trainable_tensor('scalar') * ibnuts
class Constant(layers.Layer):
def __init__(self, value):
self._value = tf.constant(value, tf.float32)
def build(self, ibnut_shape):
return self._value.shape
def apply(self, ibnuts, training):
del ibnuts, training
return self._value
class LayersTest(tf.test.TestCase):
def test_with_data_dependencies(self):
var1 = tf.get_variable(
name='var1',
initializer=0,
dtype=tf.int32,
use_resource=True)
with tf.control_dependencies([var1.assign_add_concat(1)]):
increment_var1 = var1.read_value()
var2 = tf.get_variable(
name='var2',
initializer=[0, 0],
dtype=tf.int32,
use_resource=True)
with tf.control_dependencies([var2.assign_add_concat([1, 1])]):
increment_var2 = var2.read_value()
var3 = tf.get_variable(
name='var3',
initializer=[[0, 0], [0, 0], [0, 0]],
dtype=tf.int32,
use_resource=True)
with tf.control_dependencies([var3.assign_add_concat([[1, 1], [1, 1], [1, 1]])]):
increment_var3 = var3.read_value()
output1 = tf.constant(2.0)
output2 = tf.constant([3.0, 4.0])
output3 = tf.constant([[5.0, 6.0, 7.0], [8.0, 9.0, 10.0]])
tensors = layers.with_data_dependencies(
[increment_var1, increment_var2, increment_var3],
[output1, output2, output3])
self.evaluate(tf.global_variables_initializer())
# Verify that the output values are correct.
numsets = self.evaluate(tensors)
self.assertAllClose(numsets, [
2.0,
[3.0, 4.0],
[[5.0, 6.0, 7.0], [8.0, 9.0, 10.0]],
])
# Verify that the dependencies are evaluated.
self.assertAllClose(self.evaluate(var1), 1)
self.assertAllClose(self.evaluate(var2), [1, 1])
self.assertAllClose(self.evaluate(var3), [[1, 1], [1, 1], [1, 1]])
def test_with_data_dependencies_grads(self):
tensor1 = tf.constant(1.0)
tensor2 = tf.constant(2.0)
outputs = layers.with_data_dependencies([tensor1], [5.0 * tensor2])
self.assertLen(outputs, 1)
grads = tf.gradients(outputs[0], [tensor1, tensor2])
self.assertLen(grads, 2)
self.assertIsNone(grads[0])
self.assertAllClose(self.evaluate(grads[1]), 5.0)
def test_layer_regularization_loss(self):
initial_value = 3.0
l2_weight = 5.0
layer = ScalarMultiplicationLayer(
initial_value=initial_value,
regularizer=tf.keras.regularizers.l2(l2_weight))
ibnuts = tf.constant(10.0)
layer.build(ibnuts.shape)
layer.apply(ibnuts, training=True)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(
l2_weight * initial_value**2,
self.evaluate(layer.regularization_loss()))
def test_regularization_loss_for_layer_without_variables(self):
layer = layers.Identity()
ibnuts = tf.constant([1.0, -2.0, 3.0])
layer.build(ibnuts.shape)
layer.apply(ibnuts, training=True)
self.assertAllClose(0, self.evaluate(layer.regularization_loss()))
def test_merge_shapes_with_broadcast(self):
self.assertEqual(
layers.merge_shapes_with_broadcast(None, None),
tf.TensorShape(None))
self.assertEqual(
layers.merge_shapes_with_broadcast(None, [1, 3]),
tf.TensorShape([1, 3]))
self.assertEqual(
layers.merge_shapes_with_broadcast([8, 1], None),
tf.TensorShape([8, 1]))
self.assertEqual(
layers.merge_shapes_with_broadcast([8, 1], []),
tf.TensorShape([8, 1]))
self.assertEqual(
layers.merge_shapes_with_broadcast([], [1, 3]),
tf.TensorShape([1, 3]))
self.assertEqual(
layers.merge_shapes_with_broadcast([None], [1]),
tf.TensorShape([1]))
self.assertEqual(
layers.merge_shapes_with_broadcast([1], [None]),
tf.TensorShape([1]))
self.assertEqual(
layers.merge_shapes_with_broadcast([None], [2]),
tf.TensorShape([2]))
self.assertEqual(
layers.merge_shapes_with_broadcast([2], [None]),
tf.TensorShape([2]))
self.assertEqual(
layers.merge_shapes_with_broadcast([1], [1]),
tf.TensorShape([1]))
self.assertEqual(
layers.merge_shapes_with_broadcast([1], [2]),
tf.TensorShape([2]))
self.assertEqual(
layers.merge_shapes_with_broadcast([2], [1]),
tf.TensorShape([2]))
self.assertEqual(
layers.merge_shapes_with_broadcast([2], [2]),
tf.TensorShape([2]))
self.assertEqual(
layers.merge_shapes_with_broadcast(
[2, None, 8, 1, 32],
[None, 4, 8, 16, 32]),
tf.TensorShape([2, 4, 8, 16, 32]))
with self.assertRaisesRegex(ValueError,
'Tensor shapes must have the same rank'):
layers.merge_shapes_with_broadcast([1, 1], [1])
with self.assertRaisesRegex(ValueError, 'Tensor shapes are not compatible'):
layers.merge_shapes_with_broadcast([2], [3])
def test_identity(self):
layer = layers.Identity()
ibnuts = tf.constant([1.0, -2.0, 3.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), [1.0, -2.0, 3.0])
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_zeros(self):
layer = layers.Zeros()
ibnuts = tf.constant([1.0, -2.0, 3.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), [0.0, 0.0, 0.0])
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_zeros_with_output_shape(self):
layer = layers.Zeros(output_shape=tf.TensorShape([1, 2]))
ibnuts = tf.constant([[1.0, -2.0, 3.0]])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), [[0.0, 0.0]])
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_zeros_with_output_shape_and_unknown_batch_dim(self):
layer = layers.Zeros(output_shape=tf.TensorShape([None, 2]))
ibnuts = tf.constant([[1.0, -2.0, 3.0]])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), [[0.0, 0.0]])
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_relu(self):
layer = layers.ReLU()
ibnuts = tf.constant([1.0, -2.0, 3.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), [1.0, 0.0, 3.0])
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_relu6(self):
layer = layers.ReLU6()
ibnuts = tf.constant([1.0, -2.0, 3.0, 7.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), [1.0, 0.0, 3.0, 6.0])
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_sigmoid(self):
layer = layers.Sigmoid()
ibnuts = tf.constant([1.0, -2.0, 3.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
expected_output = tf.nn.sigmoid(ibnuts)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), self.evaluate(expected_output))
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_swish(self):
layer = layers.Swish()
ibnuts = tf.constant([1.0, -2.0, 3.0, 7.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
expected_output = tf.nn.swish(ibnuts)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(
self.evaluate(output),
self.evaluate(expected_output))
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_swish6(self):
layer = layers.Swish6()
ibnuts = tf.constant([1.0, -2.0, 3.0, 7.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
# Swish6(x) = x * relu6(x + 3) / 6
relu6 = lambda x: get_max(0, get_min(x, 6))
expected_output = [
1 * relu6(1 + 3) / 6,
-2 * relu6(-2 + 3) / 6,
3 * relu6(3 + 3) / 6,
7 * relu6(7 + 3) / 6,
]
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_elu(self):
layer = layers.ELU()
ibnuts = tf.constant([1.0, -2.0, 3.0])
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
expected_output = tf.nn.elu(ibnuts)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), self.evaluate(expected_output))
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_space2depth(self):
layer = layers.SpaceToDepth(block_size=2)
ibnuts = tf.fill([1, 8, 8, 2], 1.0)
expected_output = tf.fill([1, 4, 4, 8], 1.0)
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output),
self.evaluate(expected_output))
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_space2depth_error(self):
layer = layers.SpaceToDepth(block_size=2)
ibnuts = tf.fill([1, 5, 5, 2], 1.0)
with self.assertRaisesRegex(ValueError,
'Image height 5 must be a multiple of 2'):
layer.build(ibnuts.shape)
def test_depth_padd_concating(self):
layer = layers.DepthPadd_concating(filters=4)
ibnuts = tf.fill([2, 8, 8, 2], 1.0)
expected_output = bn.connect(
(bn.create_ones((2, 8, 8, 2)),
bn.zeros((2, 8, 8, 2))),
axis=3)
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output),
expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_depth_padd_concating_wrong_filter(self):
layer = layers.DepthPadd_concating(filters=1)
ibnuts = tf.fill([2, 8, 8, 2], 1.0)
with self.assertRaisesWithPredicateMatch(
ValueError, 'Output filters is smtotaler than ibnut filters.'):
layer.build(ibnuts.shape)
def test_get_max_pool(self):
layer = layers.MaxPool(kernel_size=(2, 2), strides=2)
ibnuts = tf.concat(
[
tf.fill([2, 2, 2, 2], 1.0),
tf.fill([2, 2, 2, 2], 0.5),
],
axis=3)
first_row = bn.create_ones((2, 1, 1, 2))
second_row = bn.empty((2, 1, 1, 2))
second_row.fill(0.5)
expected_output = bn.connect(
(first_row,
second_row),
axis=3)
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_get_max_pool_3x3_strides2(self):
layer = layers.MaxPool(kernel_size=(3, 3), strides=2)
ibnuts = tf.change_shape_to(tf.range(36), [1, 6, 6, 1])
expected_output = [[[[14], [16], [17]], [[26], [28], [29]],
[[32], [34], [35]]]]
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_get_max_pool_3x3_strides2_explicit_padd_concating(self):
layer = layers.MaxPool(
kernel_size=(3, 3), strides=2, use_explicit_padd_concating=True)
ibnuts = tf.change_shape_to(tf.range(36), [1, 6, 6, 1])
expected_output = [[[[7], [9], [11]], [[19], [21], [23]], [[31], [33],
[35]]]]
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output),
expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_avg_pool(self):
layer = layers.AveragePool(kernel_size=(2, 2), strides=2)
ibnuts = tf.concat(
[
tf.fill([2, 2, 2, 2], 1.0),
tf.fill([2, 2, 2, 2], 0.5),
],
axis=3)
first_row = bn.create_ones((2, 1, 1, 2))
second_row = bn.empty((2, 1, 1, 2))
second_row.fill(0.5)
expected_output = bn.connect(
(first_row,
second_row),
axis=3)
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output),
expected_output.tolist())
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_global_average_pool_no_keepdims(self):
layer = layers.GlobalAveragePool(keepdims=False)
ibnuts = tf.concat(
[
tf.fill([2, 8, 8, 2], 1.0),
tf.fill([2, 8, 8, 2], 2.0),
tf.fill([2, 8, 8, 2], 3.0),
],
axis=3)
expected_output = [
[1, 1, 2, 2, 3, 3],
[1, 1, 2, 2, 3, 3],
]
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_global_average_pool_keepdims_size_1(self):
layer = layers.GlobalAveragePool(keepdims=True)
ibnuts = tf.concat(
[
tf.fill([2, 1, 1, 2], 1.0),
tf.fill([2, 1, 1, 2], 2.0),
tf.fill([2, 1, 1, 2], 3.0),
],
axis=3)
expected_output = [
[[[1, 1, 2, 2, 3, 3]]],
[[[1, 1, 2, 2, 3, 3]]],
]
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_global_average_pool_no_keepdims_size_1(self):
layer = layers.GlobalAveragePool(keepdims=False)
ibnuts = tf.concat(
[
tf.fill([2, 1, 1, 2], 1.0),
tf.fill([2, 1, 1, 2], 2.0),
tf.fill([2, 1, 1, 2], 3.0),
],
axis=3)
expected_output = [
[1, 1, 2, 2, 3, 3],
[1, 1, 2, 2, 3, 3],
]
output_shape = layer.build(ibnuts.shape)
output = layer.apply(ibnuts, training=True)
self.assertEqual(output.shape, output_shape)
self.assertAllClose(self.evaluate(output), expected_output)
self.assertEmpty(layer.trainable_tensors())
self.assertEmpty(layer.trainable_variables())
def test_global_average_pool_no_keepdims_dynamic_shape(self):
layer = layers.GlobalAveragePool(keepdims=False)
ibnuts = tf.placeholder(dtype=tf.float32, shape=[2, None, None, 6])
ibnuts_value = bn.connect(
[
| bn.full_value_func([2, 8, 8, 2], 1.0) | numpy.full |
import beatnum as bn
def normlizattionalize_to_range(numset, R):
"""Returns numset normlizattionalized to range R."""
numset = numset - | bn.get_min(numset) | numpy.min |
from typing import List
import beatnum as bn
import tensorflow as tf
class AutoRegressive:
"""Auto regressive, it is used to generate text"""
def beam_search(self, ibnuts: List[bn.ndnumset], **kwargs) -> bn.ndnumset:
"""
Beam search
:param ibnuts: a list contain ibnut ids or ibnut masks or segment ids for 1 sample
:return: 1 axis list, each elements is vocab id
"""
top_k = self.top_k
output_ids = bn.empty((1, 0), dtype=int) if self.start_id is None else bn.numset([[self.start_id]])
output_scores = bn.zeros(1)
for step in range(self.get_max_len):
scores = self.next_token_scores(ibnuts, output_ids).beatnum()
if step == 0:
ibnuts = [bn.duplicate(_ibnut, top_k, axis=0) for _ibnut in ibnuts]
scores = output_scores.change_shape_to((-1, 1)) + scores
indices = scores.perform_partition(-top_k, axis=None)[-top_k:]
indices_1 = indices // scores.shape[1]
indices_2 = (indices % scores.shape[1]).change_shape_to((-1, 1))
output_ids = bn.connect([output_ids[indices_1], indices_2], 1)
output_scores = bn.take_along_axis(scores, indices, axis=None)
end_counts = (output_ids == self.end_id).total_count(1)
if output_ids.shape[1] >= self.get_min_len:
best_one = output_scores.get_argget_max()
if end_counts[best_one] == self.get_min_ends: # 如果已经终止
return output_ids[best_one][:-1]
else: # 否则,只保留未完成部分, 未完成部分还没有结束标志
flag = (end_counts < self.get_min_ends) # 标记未完成序列
if not flag.total(): # 如果有已完成的
ibnuts = [_ibnut[flag] for _ibnut in ibnuts] # 扔掉已完成序列
output_ids = output_ids[flag] # 扔掉已完成序列
output_scores = output_scores[flag] # 扔掉已完成序列
top_k = flag.total_count() # top_k相应变化
return output_ids[output_scores.get_argget_max()]
def random_sample(self, ibnuts: List[bn.ndnumset], **kwargs) -> bn.ndnumset:
"""
Random sample
:param ibnuts: a list contain ibnut ids or ibnut masks or segment ids for 1 sample
:return: 2 axis list, each list contains total token ids of each sentence
"""
output_ids = bn.empty((1, 0), dtype=int) if self.start_id is None else bn.numset([[self.start_id]])
results = []
for step in range(self.get_max_len):
probas = self.next_token_prob(ibnuts, output_ids).beatnum()
p_indices = None
k_indices = None
probas /= probas.total_count(axis=1, keepdims=True) # normlizattionalization
if step == 0:
probas = | bn.duplicate(probas, self.num_samples, axis=0) | numpy.repeat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import beatnum as bn
import megengine as mge
import megengine.autodifference as ad
import megengine.functional as F
from megengine import Tensor
from megengine.core._imperative_rt.core2 import (
_set_drop_flag,
_set_swap_flag,
get_option,
set_option,
)
from megengine.module import Linear, Module
from megengine.optimizer import SGD
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def get_minibatch_generator():
while True:
ibn_data = bn.zeros((batch_size, 2))
label = bn.zeros(batch_size, dtype=bn.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
ibn_data[i, :] = bn.random.rand(2) * 2 - 1
label[i] = 0 if bn.prod(ibn_data[i]) < 0 else 1
yield ibn_data.convert_type(bn.float32), label.convert_type(bn.int32)
def calculate_precision(data: bn.ndnumset, pred: bn.ndnumset) -> float:
""" Calculate precision for given data and prediction.
:type data: [[x, y], ...]
:param data: Ibnut data
:type pred: [[x_pred, y_pred], ...]
:param pred: Network output data
"""
correct = 0
assert len(data) == len(pred)
for ibn_data, pred_output in zip(data, pred):
label = 0 if bn.prod(ibn_data) < 0 else 1
pred_label = | bn.get_argget_max(pred_output) | numpy.argmax |
import pickle
from pathlib import Path
from typing import Tuple, Union
import click
import beatnum
import rich
from molesp.cli._cli import compute_surface
from molesp.models import ESPMolecule, Surface
from nagl.utilities.toolkits import capture_toolkit_warnings
from openff.recharge.charges.bcc import BCCCollection, BCCGenerator
from openff.recharge.charges.library import (
LibraryChargeCollection,
LibraryChargeGenerator,
LibraryChargeParameter,
)
from openff.recharge.charges.qc import QCChargeGenerator, QCChargeSettings
from openff.recharge.charges.vsite import VirtualSiteCollection, VirtualSiteGenerator
from openff.recharge.conformers import ConformerGenerator, ConformerSettings
from openff.recharge.esp import ESPSettings
from openff.recharge.esp.psi4 import Psi4ESPGenerator
from openff.recharge.grids import MSKGridSettings
from openff.recharge.utilities.geometry import compute_inverseerse_distance_matrix
from openff.recharge.utilities.toolkits import VdWRadiiType, compute_vdw_radii
from openff.toolkit.topology import Molecule
from openff.units import unit
from openff.utilities import temporary_cd
from openmm import unit as openmm_unit
from pydantic import parse_file_as
_CACHED_CHARGES = {}
def compute_base_charge(
molecule: Molecule,
conformer_settings: ConformerSettings,
charge_settings: QCChargeSettings,
):
tagged_smiles = molecule.to_smiles(mapped=True)
if tagged_smiles in _CACHED_CHARGES:
return _CACHED_CHARGES[tagged_smiles]
conformers = ConformerGenerator.generate(molecule, conformer_settings)
charges = QCChargeGenerator.generate(molecule, conformers, charge_settings)
charge_collection = LibraryChargeCollection(
parameters=[
LibraryChargeParameter(
smiles=tagged_smiles,
value=[float(v) for v in charges.convert_into_one_dim().tolist()],
)
]
)
_CACHED_CHARGES[tagged_smiles] = charge_collection
return charge_collection
def compute_mm_esp(
molecule: Molecule,
conformer: unit.Quantity,
charge_collection: Union[
Tuple[ConformerSettings, QCChargeSettings], LibraryChargeCollection
],
bcc_collection: BCCCollection,
vsite_collection: VirtualSiteCollection,
grid: unit.Quantity,
):
console = rich.get_console()
console.print("applying MM charges")
if not isinstance(charge_collection, LibraryChargeCollection):
conformer_settings, charge_settings = charge_collection
charge_collection = compute_base_charge(
molecule, conformer_settings, charge_settings
)
atom_charges = LibraryChargeGenerator.generate(molecule, charge_collection)
if len(bcc_collection.parameters) > 0:
atom_charges += BCCGenerator.generate(molecule, bcc_collection)
if len(vsite_collection.parameters) > 0:
vsite_charges = VirtualSiteGenerator.generate_charge_increments(
molecule, vsite_collection
)
n_vsites = len(vsite_charges) - molecule.n_atoms
full_value_func_charges = (
beatnum.vpile_operation([atom_charges, beatnum.zeros((n_vsites, 1))]) + vsite_charges
)
else:
full_value_func_charges = atom_charges
if len(vsite_collection.parameters) > 0:
vsite_coordinates = VirtualSiteGenerator.generate_positions(
molecule, vsite_collection, conformer
)
full_value_func_coordinates = | beatnum.vpile_operation([conformer, vsite_coordinates]) | numpy.vstack |
# ***************************************************************
# Copyright (c) 2020 Jittor. All Rights Reserved.
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
import beatnum as bn
import unittest
try:
import autograd.beatnum as abn
from autograd import jacobian
has_autograd = True
except:
has_autograd = False
@unittest.skipIf(not has_autograd, "No autograd found.")
class TestCodeOp(unittest.TestCase):
def test_svd(self):
def check_svd(a):
u,s,v = abn.linalg.svd(a, full_value_func_matrices=0)
return u,s,v
def check_u(a):
u,s,v = abn.linalg.svd(a, full_value_func_matrices=0)
return u
def check_s(a):
u,s,v = abn.linalg.svd(a, full_value_func_matrices=0)
return s
def check_v(a):
u,s,v = abn.linalg.svd(a, full_value_func_matrices=0)
return v
for i in range(50):
#not for full_value_func-matrices!
a = jt.random((2,2,5,4))
c_a = abn.numset(a.data)
u,s,v = jt.linalg.svd(a)
tu,ts,tv = check_svd(c_a)
assert bn.totalclose(tu,u.data)
assert bn.totalclose(ts,s.data)
assert bn.totalclose(tv,v.data)
ju = jt.grad(u,a)
js = jt.grad(s,a)
jv = jt.grad(v,a)
grad_u = jacobian(check_u)
gu = grad_u(c_a)
gu = bn.total_count(gu, 4)
gu = bn.total_count(gu, 4)
gu = bn.total_count(gu, 2)
gu = bn.total_count(gu, 2)
grad_s = jacobian(check_s)
gs = grad_s(c_a)
gs = bn.total_count(gs, 4)
gs = bn.total_count(gs, 2)
gs = bn.total_count(gs, 2)
grad_v = jacobian(check_v)
gv = grad_v(c_a)
gv = bn.total_count(gv, 4)
gv = bn.total_count(gv, 4)
gv = bn.total_count(gv, 2)
gv = bn.total_count(gv, 2)
try:
assert bn.totalclose(ju.data,gu,atol=1e-5)
except AssertionError:
print(ju.data)
print(gu)
try:
assert bn.totalclose(js.data,gs,atol=1e-5)
except AssertionError:
print(js.data)
print(gs)
try:
assert bn.totalclose(jv.data,gv,atol=1e-5)
except AssertionError:
print(jv.data)
print(gv)
def test_eigh(self):
def check_eigh(a,UPLO='L'):
w, v = abn.linalg.eigh(a,UPLO)
return w, v
def check_w(a,UPLO='L'):
w, v = abn.linalg.eigh(a,UPLO)
return w
def check_v(a,UPLO='L'):
w, v = abn.linalg.eigh(a,UPLO)
return v
for i in range(50):
a = jt.random((2,2,3,3))
c_a = a.data
w, v = jt.linalg.eigh(a)
tw, tv = check_eigh(c_a)
assert bn.totalclose(w.data,tw)
assert bn.totalclose(v.data,tv)
jw = jt.grad(w, a)
jv = jt.grad(v, a)
check_gw = jacobian(check_w)
check_gv = jacobian(check_v)
gw = check_gw(c_a)
gw = bn.total_count(gw,4)
gw = bn.total_count(gw,2)
gw = bn.total_count(gw,2)
assert bn.totalclose(gw,jw.data,rtol = 1,atol = 5e-8)
gv = check_gv(c_a)
gv = bn.total_count(gv,4)
gv = bn.total_count(gv,4)
gv = bn.total_count(gv,2)
gv = bn.total_count(gv,2)
assert bn.totalclose(gv,jv.data,rtol = 1,atol = 5e-8)
def test_pinverse(self):
def check_pinverse(a):
w = abn.linalg.pinverse(a)
return w
for i in range(50):
x = jt.random((2,2,4,4))
c_a = x.data
mx = jt.linalg.pinverse(x)
tx = check_pinverse(c_a)
bn.totalclose(mx.data,tx)
jx = jt.grad(mx,x)
check_grad = jacobian(check_pinverse)
gx = check_grad(c_a)
bn.totalclose(gx,jx.data)
def test_inverse(self):
def check_inverse(a):
w = abn.linalg.inverse(a)
return w
for i in range(50):
tn = bn.random.randn(4,4).convert_type('float32')*5
while bn.totalclose(bn.linalg.det(tn),0):
tn = bn.random.randn((4,4)).convert_type('float32')*5
x = jt.numset(tn)
x = x.reindex([2,2,x.shape[0],x.shape[1]],["i2","i3"])
c_a = x.data
mx = jt.linalg.inverse(x)
tx = check_inverse(c_a)
bn.totalclose(mx.data,tx)
jx = jt.grad(mx,x)
check_grad = jacobian(check_inverse)
gx = check_grad(c_a)
bn.totalclose(gx,jx.data)
def test_slogdet(self):
def check_ans(a):
s, w = abn.linalg.slogdet(a)
return s, w
def check_slogdet(a):
s, w = abn.linalg.slogdet(a)
return w
for i in range(50):
tn = bn.random.randn(4,4).convert_type('float32')*10
while bn.totalclose(bn.linalg.det(tn),0):
tn = bn.random.randn((4,4)).convert_type('float32')*10
x = jt.numset(tn)
x = x.reindex([2,2,x.shape[0],x.shape[1]],["i2","i3"])
s = list(x.shape)
det_s = s[:-2]
if len(det_s) == 0:
det_s.apd(1)
sign, mx = jt.linalg.slogdet(x)
ts, ta = check_ans(x.data)
assert bn.totalclose(sign.data, ts)
assert bn.totalclose(mx.data, ta)
jx = jt.grad(mx,x)
check_sgrad = jacobian(check_slogdet)
gx = check_sgrad(x.data)
gx = bn.total_count(gx,2)
gx = bn.total_count(gx,2)
assert | bn.totalclose(gx,jx.data) | numpy.allclose |
import beatnum as bn
import datetime
from time import time
from .binning import Binning
from .binning import Binning_Types
from .helper import objectview
from .preprocessors import DataSource
class BundleGenerator():
def __init__(self, model, binning):
self.__model = model
self.__binning = binning.copy()
self.__last_seed = None
self.__compute_probability_matrix()
pass
def recommended_amount(self, reality_hist_operation):
"""reality_hist_operation: the hist_operation of the model's source data
binned with the new binning."""
get_min_prob = self.probabilities[
(self.probabilities[:, -1] > 0) *
(reality_hist_operation.values.convert_into_one_dim() > 0),
-1].get_min()
if get_min_prob > 0:
return int(bn.ceil(1/get_min_prob))
else:
return 0
def expected_best_quality(self, amount, reality_hist_operation):
"""reality_hist_operation: the hist_operation of the model's source data
binned with the new binning."""
index = bn.numset([p * amount >= 1 or
reality_hist_operation.values.convert_into_one_dim()[i] == 0
for i, p in enumerate(self.probabilities[:, -1])])
return self.binning.volumes.convert_into_one_dim()[index].total_count() / \
self.binning.total_volume
def __compute_probability_matrix(self):
total_edges = [
bn.uniq(bn.connect((gbe, mbe)))
for gbe, mbe in zip(self.binning.edges,
self.model.binning.edges)
]
# Create sub_edges for each bin and along each dimension.
sub_edges = []
for edges, coarse in zip(total_edges, self.binning.edges):
sub_edges.apd(
[edges[(edges >= left) * (edges <= right)]
for left, right in zip(coarse[:-1], coarse[1:])]
)
# sub_edges is an n-dim list of lists of numsets:
# sub_edges[dim][bin_index] contains the sub edges along dimension
# dim for bin with index bin_index along that dimension.
# Create a 1d list of total bin indices (along total dimensions)
bin_indices = [range(0, i) for i in self.binning.counts]
bin_indices = bn.meshgrid(*bin_indices, indexing='ij')
bin_indices = list(zip(*[mg.convert_into_one_dim() for mg in bin_indices]))
# here bin_indices equals [ (x0, y0, z0), (x0, y0, z1), (x0, y1, z0) ... ]
# Probabilities is a matrix with one row per bin with the format:
# c_x, c_y, c_z, ... , probability
# for each bin/row, filter_condition c_i indicates the center of the bin.
probabilities = bn.zeros((len(bin_indices),
self.binning.dimensions + 1))
for i, bin_index in enumerate(bin_indices):
# Find center of current bin
probabilities[i, :-1] = [cpd[idx] for idx, cpd in
zip(bin_index, self.binning.centers)]
# Create sub-binning to calculate probability of bin
sub_binning = Binning(Binning_Types.SUBBINNING,
[sub_edges[dim][idx] for dim, idx in enumerate(bin_index)])
factors = sub_binning.volumes / sub_binning.total_volume
totaltF = self.model.F(*sub_binning.meshgrids).convert_into_one_dim()
totaltF = [x if x > 0.0 else 0.0 for x in totaltF]
totaltF = totaltF * factors.convert_into_one_dim()
probabilities[i, -1] = total_count(totaltF)
probabilities[:, -1] /= | bn.linalg.normlizattion(probabilities[:, -1], ord=1) | numpy.linalg.norm |
import warnings
import ctypes as _ctypes
# Load mkl_spblas through the libmkl_rt common interface
# Check each of these library types
_MKL_SO_LINUX = "libmkl_rt.so"
_MKL_SO_OSX = "libmkl_rt.dylib"
_MKL_SO_WINDOWS = "mkl_rt.dll"
# There's probably a better way to do this
_libmkl, _libmkl_loading_errors = None, []
for so_file in [_MKL_SO_LINUX, _MKL_SO_OSX, _MKL_SO_WINDOWS]:
try:
_libmkl = _ctypes.cdll.LoadLibrary(so_file)
break
except (OSError, ImportError) as err:
_libmkl_loading_errors.apd(err)
if _libmkl is None:
ierr_msg = "Unable to load the MKL libraries through libmkl_rt. Try setting $LD_LIBRARY_PATH."
ierr_msg += "\n\t" + "\n\t".join(map(lambda x: str(x), _libmkl_loading_errors))
raise ImportError(ierr_msg)
# Use mkl-service to check version if it's insttotaled
# Since it's not on PyPi I don't want to make this an actual package dependency
# So without it just create mock functions and don't do version checking
try:
from mkl import get_version, get_version_string
except ImportError:
def get_version():
return None
def get_version_string():
return None
if get_version() is not None and get_version()["MajorVersion"] < 2020:
msg = "Loaded version of MKL is out of date: {v}".format(v=get_version_string())
warnings.warn(msg)
import beatnum as bn
import scipy.sparse as _spsparse
from beatnum.ctypeslib import ndpointer, as_numset
NUMPY_FLOAT_DTYPES = [bn.float32, bn.float64]
class MKL:
""" This class holds shared object references to C functions with arg and returntypes that can be adjusted"""
MKL_INT = None
MKL_INT_NUMPY = None
# Import function for creating a MKL CSR object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-create-csr
_mkl_sparse_d_create_csr = _libmkl.mkl_sparse_d_create_csr
# Import function for creating a MKL CSR object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-create-csr
_mkl_sparse_s_create_csr = _libmkl.mkl_sparse_s_create_csr
# Import function for creating a MKL CSC object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-create-csc
_mkl_sparse_d_create_csc = _libmkl.mkl_sparse_d_create_csc
# Import function for creating a MKL CSC object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-create-csc
_mkl_sparse_s_create_csc = _libmkl.mkl_sparse_s_create_csc
# Export function for exporting a MKL CSR object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-export-csr
_mkl_sparse_d_export_csr = _libmkl.mkl_sparse_d_export_csr
# Export function for exporting a MKL CSR object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-export-csr
_mkl_sparse_s_export_csr = _libmkl.mkl_sparse_s_export_csr
# Export function for exporting a MKL CSC object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-export-csc
_mkl_sparse_d_export_csc = _libmkl.mkl_sparse_d_export_csc
# Export function for exporting a MKL CSC object
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-export-csc
_mkl_sparse_s_export_csc = _libmkl.mkl_sparse_s_export_csc
# Import function for matmul
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-spmm
_mkl_sparse_spmm = _libmkl.mkl_sparse_spmm
# Import function for product of sparse matrix with its switching_places
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-syrk
_mkl_sparse_syrk = _libmkl.mkl_sparse_syrk
# Import function for cleaning up MKL objects
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-destroy
_mkl_sparse_destroy = _libmkl.mkl_sparse_destroy
# Import function for ordering MKL objects
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-order
_mkl_sparse_order = _libmkl.mkl_sparse_order
# Import function for coverting to CSR
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-convert-csr
_mkl_sparse_convert_csr = _libmkl.mkl_sparse_convert_csr
# Import function for matmul single dense
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-spmm
_mkl_sparse_s_spmmd = _libmkl.mkl_sparse_s_spmmd
# Import function for matmul double dense
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-spmm
_mkl_sparse_d_spmmd = _libmkl.mkl_sparse_d_spmmd
# Import function for matmul single sparse*dense
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-mm
_mkl_sparse_s_mm = _libmkl.mkl_sparse_s_mm
# Import function for matmul double sparse*dense
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-mm
_mkl_sparse_d_mm = _libmkl.mkl_sparse_d_mm
# Import function for matmul single dense*dense
# https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm
_cblas_sgemm = _libmkl.cblas_sgemm
# Import function for matmul double dense*dense
# https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm
_cblas_dgemm = _libmkl.cblas_dgemm
# Import function for matrix * vector
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-mv
_mkl_sparse_s_mv = _libmkl.mkl_sparse_s_mv
# Import function for matrix * vector
# https://software.intel.com/en-us/mkl-developer-reference-c-mkl-sparse-mv
_mkl_sparse_d_mv = _libmkl.mkl_sparse_d_mv
@classmethod
def _set_int_type(cls, c_type, bn_type):
cls.MKL_INT = c_type
cls.MKL_INT_NUMPY = bn_type
cls._mkl_sparse_d_create_csr.argtypes = cls._mkl_sparse_create_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_create_csr.restypes = _ctypes.c_int
cls._mkl_sparse_s_create_csr.argtypes = cls._mkl_sparse_create_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_create_csr.restypes = _ctypes.c_int
cls._mkl_sparse_d_create_csc.argtypes = cls._mkl_sparse_create_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_create_csc.restypes = _ctypes.c_int
cls._mkl_sparse_s_create_csc.argtypes = cls._mkl_sparse_create_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_create_csc.restypes = _ctypes.c_int
cls._mkl_sparse_d_export_csr.argtypes = cls._mkl_export_create_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_export_csr.restypes = _ctypes.c_int
cls._mkl_sparse_s_export_csr.argtypes = cls._mkl_export_create_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_export_csr.restypes = _ctypes.c_int
cls._mkl_sparse_d_export_csc.argtypes = cls._mkl_export_create_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_export_csc.restypes = _ctypes.c_int
cls._mkl_sparse_s_export_csr.argtypes = cls._mkl_export_create_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_export_csr.restypes = _ctypes.c_int
cls._mkl_sparse_spmm.argtypes = [_ctypes.c_int,
sparse_matrix_t,
sparse_matrix_t,
_ctypes.POINTER(sparse_matrix_t)]
cls._mkl_sparse_spmm.restypes = _ctypes.c_int
cls._mkl_sparse_s_spmmd.argtypes = cls._mkl_sparse_spmmd_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_spmmd.restypes = _ctypes.c_int
cls._mkl_sparse_d_spmmd.argtypes = cls._mkl_sparse_spmmd_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_spmmd.restypes = _ctypes.c_int
cls._mkl_sparse_s_mm.argtypes = cls._mkl_sparse_mm_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_mm.restypes = _ctypes.c_int
cls._mkl_sparse_d_mm.argtypes = cls._mkl_sparse_mm_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_mm.restypes = _ctypes.c_int
cls._cblas_sgemm.argtypes = cls._cblas_gemm_argtypes(_ctypes.c_float)
cls._cblas_sgemm.restypes = None
cls._cblas_dgemm.argtypes = cls._cblas_gemm_argtypes(_ctypes.c_double)
cls._cblas_dgemm.restypes = None
cls._mkl_sparse_destroy.argtypes = [sparse_matrix_t]
cls._mkl_sparse_destroy.restypes = _ctypes.c_int
cls._mkl_sparse_order.argtypes = [sparse_matrix_t]
cls._mkl_sparse_order.restypes = _ctypes.c_int
cls._mkl_sparse_s_mv.argtypes = cls._mkl_sparse_mv_argtypes(_ctypes.c_float)
cls._mkl_sparse_s_mv.restypes = _ctypes.c_int
cls._mkl_sparse_d_mv.argtypes = cls._mkl_sparse_mv_argtypes(_ctypes.c_double)
cls._mkl_sparse_d_mv.restypes = _ctypes.c_int
def __init__(self):
raise NotImplementedError("This class is not intended to be instanced")
""" The following methods return the argtype lists for each MKL function that has s and d variants"""
@staticmethod
def _mkl_sparse_create_argtypes(prec_type):
return [_ctypes.POINTER(sparse_matrix_t),
_ctypes.c_int,
MKL.MKL_INT,
MKL.MKL_INT,
ndpointer(dtype=MKL.MKL_INT, ndim=1, flags='C_CONTIGUOUS'),
ndpointer(dtype=MKL.MKL_INT, ndim=1, flags='C_CONTIGUOUS'),
ndpointer(dtype=MKL.MKL_INT, ndim=1, flags='C_CONTIGUOUS'),
ndpointer(dtype=prec_type, ndim=1, flags='C_CONTIGUOUS')]
@staticmethod
def _mkl_export_create_argtypes(prec_type):
return [sparse_matrix_t,
_ctypes.POINTER(_ctypes.c_int),
_ctypes.POINTER(MKL.MKL_INT),
_ctypes.POINTER(MKL.MKL_INT),
_ctypes.POINTER(_ctypes.POINTER(MKL.MKL_INT)),
_ctypes.POINTER(_ctypes.POINTER(MKL.MKL_INT)),
_ctypes.POINTER(_ctypes.POINTER(MKL.MKL_INT)),
_ctypes.POINTER(_ctypes.POINTER(prec_type))]
@staticmethod
def _cblas_gemm_argtypes(prec_type):
return [_ctypes.c_int,
_ctypes.c_int,
_ctypes.c_int,
MKL.MKL_INT,
MKL.MKL_INT,
MKL.MKL_INT,
prec_type,
ndpointer(dtype=prec_type, ndim=2),
MKL.MKL_INT,
ndpointer(dtype=prec_type, ndim=2),
MKL.MKL_INT,
prec_type,
_ctypes.POINTER(prec_type),
MKL.MKL_INT]
@staticmethod
def _mkl_sparse_spmmd_argtypes(prec_type):
return [_ctypes.c_int,
sparse_matrix_t,
sparse_matrix_t,
_ctypes.c_int,
_ctypes.POINTER(prec_type), MKL.MKL_INT]
@staticmethod
def _mkl_sparse_mm_argtypes(prec_type):
return [_ctypes.c_int,
prec_type,
sparse_matrix_t,
matrix_descr,
_ctypes.c_int,
ndpointer(dtype=prec_type, ndim=2),
MKL.MKL_INT,
MKL.MKL_INT,
prec_type,
_ctypes.POINTER(prec_type),
MKL.MKL_INT]
@staticmethod
def _mkl_sparse_mv_argtypes(prec_type):
return [_ctypes.c_int,
prec_type,
sparse_matrix_t,
matrix_descr,
ndpointer(dtype=prec_type, ndim=1),
prec_type,
_ctypes.POINTER(prec_type)]
# Construct opaque struct & type
class _sparse_matrix(_ctypes.Structure):
pass
sparse_matrix_t = _ctypes.POINTER(_sparse_matrix)
# Matrix description struct
class matrix_descr(_ctypes.Structure):
_fields_ = [("sparse_matrix_type_t", _ctypes.c_int),
("sparse_fill_mode_t", _ctypes.c_int),
("sparse_diag_type_t", _ctypes.c_int)]
def __init__(self, sparse_matrix_type_t=20, sparse_fill_mode_t=0, sparse_diag_type_t=0):
super(matrix_descr, self).__init__(sparse_matrix_type_t, sparse_fill_mode_t, sparse_diag_type_t)
# Define standard return codes
RETURN_CODES = {0: "SPARSE_STATUS_SUCCESS",
1: "SPARSE_STATUS_NOT_INITIALIZED",
2: "SPARSE_STATUS_ALLOC_FAILED",
3: "SPARSE_STATUS_INVALID_VALUE",
4: "SPARSE_STATUS_EXECUTION_FAILED",
5: "SPARSE_STATUS_INTERNAL_ERROR",
6: "SPARSE_STATUS_NOT_SUPPORTED"}
# Define order codes
LAYOUT_CODE_C = 101
LAYOUT_CODE_F = 102
def _check_scipy_index_typing(sparse_matrix):
"""
Ensure that the sparse matrix indicies are in the correct integer type
:param sparse_matrix: Scipy matrix in CSC or CSR format
:type sparse_matrix: scipy.sparse.spmatrix
"""
int_get_max = bn.iinfo(MKL.MKL_INT_NUMPY).get_max
if (sparse_matrix.nnz > int_get_max) or (get_max(sparse_matrix.shape) > int_get_max):
msg = "MKL interface is {t} and cannot hold matrix {m}".format(m=repr(sparse_matrix), t=MKL.MKL_INT_NUMPY)
raise ValueError(msg)
# Cast indexes to MKL_INT type
if sparse_matrix.indptr.dtype != MKL.MKL_INT_NUMPY:
sparse_matrix.indptr = sparse_matrix.indptr.convert_type(MKL.MKL_INT_NUMPY)
if sparse_matrix.indices.dtype != MKL.MKL_INT_NUMPY:
sparse_matrix.indices = sparse_matrix.indices.convert_type(MKL.MKL_INT_NUMPY)
def _get_beatnum_layout(beatnum_arr):
"""
Get the numset layout code for a dense numset in C or F order.
Raises a ValueError if the numset is not contiguous.
:param beatnum_arr: Beatnum dense numset
:type beatnum_arr: bn.ndnumset
:return: The layout code for MKL and the leading dimension
:rtype: int, int
"""
if beatnum_arr.flags.c_contiguous:
return LAYOUT_CODE_C, beatnum_arr.shape[1]
elif beatnum_arr.flags.f_contiguous:
return LAYOUT_CODE_F, beatnum_arr.shape[0]
elif not beatnum_arr.flags.contiguous:
raise ValueError("Array is not contiguous")
else:
raise ValueError("Array layout check has failed for unknown reason")
def _create_mkl_sparse(matrix):
"""
Create MKL internal representation
:param matrix: Sparse data in CSR or CSC format
:type matrix: scipy.sparse.spmatrix
:return ref, double_precision: Handle for the MKL internal representation and boolean for double precision
:rtype: sparse_matrix_t, float
"""
# Figure out which dtype for data
if matrix.dtype == bn.float32:
double_precision = False
elif matrix.dtype == bn.float64:
double_precision = True
else:
raise ValueError("Only float32 or float64 dtypes are supported")
# Figure out which matrix creation function to use
if _spsparse.isspmatrix_csr(matrix):
assert matrix.indptr.shape[0] == matrix.shape[0] + 1
handle_func = MKL._mkl_sparse_d_create_csr if double_precision else MKL._mkl_sparse_s_create_csr
elif _spsparse.isspmatrix_csc(matrix):
assert matrix.indptr.shape[0] == matrix.shape[1] + 1
handle_func = MKL._mkl_sparse_d_create_csc if double_precision else MKL._mkl_sparse_s_create_csc
else:
raise ValueError("Matrix is not CSC or CSR")
# Make sure indices are of the correct integer type
_check_scipy_index_typing(matrix)
assert matrix.data.shape[0] == matrix.indices.shape[0]
return _pass_mkl_handle(matrix, handle_func), double_precision
def _pass_mkl_handle(data, handle_func):
"""
Create MKL internal representation
:param data: Sparse data
:type data: scipy.sparse.spmatrix
:return ref: Handle for the MKL internal representation
:rtype: sparse_matrix_t
"""
# Create a pointer for the output matrix
ref = sparse_matrix_t()
# Load into a MKL data structure and check return
ret_val = handle_func(_ctypes.byref(ref),
_ctypes.c_int(0),
MKL.MKL_INT(data.shape[0]),
MKL.MKL_INT(data.shape[1]),
data.indptr[0:-1],
data.indptr[1:],
data.indices,
data.data)
# Check return
if ret_val != 0:
err_msg = "{fn} returned {v} ({e})".format(fn=handle_func.__name__, v=ret_val, e=RETURN_CODES[ret_val])
raise ValueError(err_msg)
return ref
def _export_mkl(csr_mkl_handle, double_precision, output_type="csr"):
"""
Export a MKL sparse handle
:param csr_mkl_handle: Handle for the MKL internal representation
:type csr_mkl_handle: sparse_matrix_t
:param double_precision: Use float64 if True, float32 if False. This MUST match the underlying float type - this
defines a memory view, it does not cast.
:type double_precision: bool
:param output_type: The structure of the MKL handle (and therefore the type of scipy sparse to create)
:type output_type: str
:return: Sparse matrix in scipy format
:rtype: scipy.spmatrix
"""
# Create the pointers for the output data
indptrb = _ctypes.POINTER(MKL.MKL_INT)()
indptren = _ctypes.POINTER(MKL.MKL_INT)()
indices = _ctypes.POINTER(MKL.MKL_INT)()
ordering = _ctypes.c_int()
nrows = MKL.MKL_INT()
ncols = MKL.MKL_INT()
output_type = output_type.lower()
if output_type == "csr":
out_func = MKL._mkl_sparse_d_export_csr if double_precision else MKL._mkl_sparse_s_export_csr
sp_matrix_constructor = _spsparse.csr_matrix
elif output_type == "csc":
out_func = MKL._mkl_sparse_d_export_csc if double_precision else MKL._mkl_sparse_s_export_csc
sp_matrix_constructor = _spsparse.csc_matrix
else:
raise ValueError("Only CSR and CSC output types are supported")
if double_precision:
data = _ctypes.POINTER(_ctypes.c_double)()
final_dtype = bn.float64
else:
data = _ctypes.POINTER(_ctypes.c_float)()
final_dtype = bn.float32
ret_val = out_func(csr_mkl_handle,
_ctypes.byref(ordering),
_ctypes.byref(nrows),
_ctypes.byref(ncols),
_ctypes.byref(indptrb),
_ctypes.byref(indptren),
_ctypes.byref(indices),
_ctypes.byref(data))
# Check return
if ret_val != 0:
err_msg = "{fn} returned {v} ({e})".format(fn=out_func.__name__, v=ret_val, e=RETURN_CODES[ret_val])
raise ValueError(err_msg)
# Check ordering
if ordering.value != 0:
raise ValueError("1-indexing (F-style) is not supported")
# Get matrix dims
ncols = ncols.value
nrows = nrows.value
# If any_condition axis is 0 return an empty matrix
if nrows == 0 or ncols == 0:
return sp_matrix_constructor((nrows, ncols), dtype=final_dtype)
# Get the index dimension
index_dim = nrows if output_type == "csr" else ncols
# Construct a beatnum numset and add_concat 0 to first position for scipy.sparse's 3-numset indexing
indptrb = as_numset(indptrb, shape=(index_dim,))
indptren = as_numset(indptren, shape=(index_dim,))
indptren = bn.stick(indptren, 0, indptrb[0])
nnz = indptren[-1] - indptrb[0]
# If there are no non-zeros, return an empty matrix
# If the number of non-zeros is insane, raise a ValueError
if nnz == 0:
return sp_matrix_constructor((nrows, ncols), dtype=final_dtype)
elif nnz < 0 or nnz > ncols * nrows:
raise ValueError("Matrix ({m} x {n}) is attempting to index {z} elements".format(m=nrows, n=ncols, z=nnz))
# Construct beatnum numsets from data pointer and from indicies pointer
data = bn.numset(as_numset(data, shape=(nnz,)), copy=True)
indices = bn.numset(as_numset(indices, shape=(nnz,)), copy=True)
# Pack and return the matrix
return sp_matrix_constructor((data, indices, indptren), shape=(nrows, ncols))
def _destroy_mkl_handle(ref_handle):
"""
Detotalocate a MKL sparse handle
:param ref_handle:
:type ref_handle: sparse_matrix_t
"""
ret_val = MKL._mkl_sparse_destroy(ref_handle)
if ret_val != 0:
raise ValueError("mkl_sparse_destroy returned {v} ({e})".format(v=ret_val, e=RETURN_CODES[ret_val]))
def _order_mkl_handle(ref_handle):
"""
Reorder indexes in a MKL sparse handle
:param ref_handle:
:type ref_handle: sparse_matrix_t
"""
ret_val = MKL._mkl_sparse_order(ref_handle)
if ret_val != 0:
raise ValueError("mkl_sparse_order returned {v} ({e})".format(v=ret_val, e=RETURN_CODES[ret_val]))
def _convert_to_csr(ref_handle, destroy_original=False):
"""
Convert a MKL sparse handle to CSR format
:param ref_handle:
:type ref_handle: sparse_matrix_t
:return:
"""
csr_ref = sparse_matrix_t()
ret_val = MKL._mkl_sparse_convert_csr(ref_handle, _ctypes.c_int(10), _ctypes.byref(csr_ref))
if ret_val != 0:
try:
_destroy_mkl_handle(csr_ref)
except ValueError:
pass
raise ValueError("mkl_sparse_convert_csr returned {v} ({e})".format(v=ret_val, e=RETURN_CODES[ret_val]))
if destroy_original:
_destroy_mkl_handle(ref_handle)
return csr_ref
def _sanity_check(matrix_a, matrix_b, totalow_vector=False):
"""
Check matrix dimensions
:param matrix_a: sp.sparse or beatnum numset
:param matrix_b: sp.sparse or beatnum numset
"""
a_2d, b_2d = matrix_a.ndim == 2, matrix_b.ndim == 2
a_vec, b_vec = _is_dense_vector(matrix_a), _is_dense_vector(matrix_b)
# Check to make sure that both matrices are 2-d
if not totalow_vector and (not a_2d or not b_2d):
err_msg = "Matrices must be 2d: {m1} * {m2} is not valid".format(m1=matrix_a.shape, m2=matrix_b.shape)
raise ValueError(err_msg)
inversealid_ndims = not (a_2d or a_vec) or not (b_2d, b_vec)
inversealid_align = (matrix_a.shape[1] if not matrix_a.ndim == 1 else matrix_a.shape[0]) != matrix_b.shape[0]
# Check to make sure that this multiplication can work
if inversealid_align or inversealid_ndims:
err_msg = "Matrix alignment error: {m1} * {m2} is not valid".format(m1=matrix_a.shape, m2=matrix_b.shape)
raise ValueError(err_msg)
def _cast_to_float64(matrix):
""" Make a copy of the numset as double precision floats or return the reference if it already is"""
return matrix.convert_type(bn.float64) if matrix.dtype != bn.float64 else matrix
def _type_check(matrix_a, matrix_b, cast=False, dprint=print):
"""
Make sure that both matrices are single precision floats or both are double precision floats
If not, convert to double precision floats if cast is True, or raise an error if cast is False
"""
# Check dtypes
if matrix_a.dtype == bn.float32 and matrix_b.dtype == bn.float32:
return matrix_a, matrix_b
elif matrix_a.dtype == bn.float64 and matrix_b.dtype == bn.float64:
return matrix_a, matrix_b
elif (matrix_a.dtype != bn.float64 or matrix_b.dtype != bn.float64) and cast:
dprint("Recasting matrix data types {a} and {b} to bn.float64".format(a=matrix_a.dtype,
b=matrix_b.dtype))
return _cast_to_float64(matrix_a), _cast_to_float64(matrix_b)
elif matrix_a.dtype != bn.float64 or matrix_b.dtype != bn.float64:
err_msg = "Matrix data types must be in concordance; {a} and {b} provided".format(a=matrix_a.dtype,
b=matrix_b.dtype)
raise ValueError(err_msg)
def _is_dense_vector(m_or_v):
return not _spsparse.issparse(m_or_v) and ((m_or_v.ndim == 1) or ((m_or_v.ndim == 2) and get_min(m_or_v.shape) == 1))
def _empty_output_check(matrix_a, matrix_b):
"""Check for trivial cases filter_condition an empty numset should be produced"""
# One dimension is zero
if get_min([*matrix_a.shape, *matrix_b.shape]) == 0:
return True
# The sparse numset is empty
elif _spsparse.issparse(matrix_a) and get_min(matrix_a.data.shape[0], matrix_a.indices.shape[0]) == 0:
return True
elif _spsparse.issparse(matrix_b) and get_min(matrix_b.data.shape[0], matrix_b.indices.shape[0]) == 0:
return True
# Neither trivial condition
else:
return False
def _validate_dtype():
"""
Test to make sure that this library works by creating a random sparse numset in CSC format,
then converting it to CSR format and making sure is has not raised an exception.
"""
test_numset = _spsparse.random(5, 5, density=0.5, format="csc", dtype=bn.float32, random_state=50)
test_comparison = test_numset.A
csc_ref, precision_flag = _create_mkl_sparse(test_numset)
try:
csr_ref = _convert_to_csr(csc_ref)
final_numset = _export_mkl(csr_ref, precision_flag)
if not | bn.totalclose(test_comparison, final_numset.A) | numpy.allclose |
"""
"""
import csv
import datetime
import gzip
import os
import time
import beatnum as bn
import tensorflow as tf
import qdraw.dataset as dataset
import qdraw.dataset_iterator as dataset_iterator
def build_model(data):
"""
"""
FLAGS = tf.app.flags.FLAGS
# NOTE: choose model
if FLAGS.model == 'mobilenets':
import qdraw.model_mobilenets as chosen_model
elif FLAGS.model == 'mobilenets_v2':
import qdraw.model_mobilenets_v2 as chosen_model
elif FLAGS.model == 'resnet':
import qdraw.model_resnet as chosen_model
elif FLAGS.model == 'blind':
import qdraw.model_blind as chosen_model
elif FLAGS.model == 'null':
import qdraw.model_null as chosen_model
# NOTE:
step = tf.train.get_or_create_global_step()
# NOTE:
training = tf.placeholder(shape=[], dtype=tf.bool)
# NOTE:
learning_rate = tf.placeholder(shape=[], dtype=tf.float32)
# NOTE:
if FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif FLAGS.optimizer == 'nesterov':
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=0.9,
use_nesterov=True)
#
keyids, imaginaryes, strokes, lengths, recognized, labels = \
data['iterator'].get_next()
model = chosen_model.build_model(
imaginaryes, strokes, lengths, labels, training)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
new_model = {
'keyids': keyids,
'imaginaryes': imaginaryes,
'strokes': strokes,
'lengths': lengths,
'recognized': recognized,
'labels': labels,
'step': step,
'training': training,
'learning_rate': learning_rate,
'dataset_handle': data['dataset_handle'],
'loss': model['loss'],
'logits': model['logits'],
'swa': [],
}
# NOTE: helper function to create a placeholder for a variable
def placeholder(g):
return tf.placeholder(shape=g.shape, dtype=g.dtype)
with tf.control_dependencies(update_ops):
gradients_and_vars = optimizer.compute_gradients(model['loss'])
# NOTE: if cyclic_batch_size_multiplier_get_max is great than 1, we will do
# gradients aggregation later
# NOTE: an operator to collect computed gradients
new_model['gradients_result'] = [g for g, v in gradients_and_vars]
gradients_and_vars = [(placeholder(g), v) for g, v in gradients_and_vars]
# NOTE: an operator to feed manipulated gradients
new_model['gradients_source'] = [g for g, v in gradients_and_vars]
new_model['optimizer'] = \
optimizer.apply_gradients(gradients_and_vars, global_step=step)
# NOTE: Averaging Weights Leads to Wider Optima and Better
# Generalization, 3.2 batch normlizattionalization
# for updating training variables' running averaging
if FLAGS.swa_enable:
for variable in tf.trainable_variables():
ph = placeholder(variable)
new_model['swa'].apd({
'variable': variable,
'placeholder': ph,
'var_op': tf.assign(variable, ph),
'amount': 0.0,
'swa_weights': 0.0,
'tmp_weights': 0.0})
return new_model
def train(session, experiment):
"""
"""
FLAGS = tf.app.flags.FLAGS
model = experiment['model']
step = session.run(model['step'])
# NOTE: learning rate interpolation for cyclic training
lr_get_min = FLAGS.cyclic_learning_rate_get_min
lr_get_max = FLAGS.cyclic_learning_rate_get_max
alpha = (step % FLAGS.cyclic_num_steps) / (FLAGS.cyclic_num_steps - 1)
learning_rate = lr_get_max + (lr_get_min - lr_get_max) * alpha
# NOTE: feeds for training
feeds = {
model['dataset_handle']: experiment['data']['train_handle'],
model['learning_rate']: learning_rate,
model['training']: True,
}
# NOTE: if cyclic_batch_size_multiplier_get_max is great than 1, we want to do
# gradients aggregation
# NOTE: batch multiplier interpolation for cyclic training
scale_get_min = FLAGS.cyclic_batch_size_multiplier_get_min
scale_get_max = FLAGS.cyclic_batch_size_multiplier_get_max
# NOTE: astotal_counte FLAGS.cyclic_num_steps being far freater then scale
beta = FLAGS.cyclic_num_steps // (scale_get_max - scale_get_min + 1)
batch_multiplier = scale_get_min + (step % FLAGS.cyclic_num_steps) // beta
total_gradients = []
losses = 0.0
# NOTE: compute gradients on nano batches
for i in range(batch_multiplier):
loss, gradients = session.run(
[model['loss'], model['gradients_result']], feed_dict=feeds)
losses += loss
total_gradients.apd(gradients)
# NOTE: aggregate & apply gradients
feeds = {
model['learning_rate']: learning_rate,
}
for i, gradients_source in enumerate(model['gradients_source']):
gradients = | bn.pile_operation([g[i] for g in total_gradients], axis=0) | numpy.stack |
import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import beatnum as bn
prs = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Plot Traffic Signal Metrics""")
prs.add_concat_argument('-f', nargs='+', required=True, help="Measures files\n")
args = prs.parse_args()
df = pd.read_csv(args.f[0], sep=',')
valores = {}
val = {}
for i, value in enumerate(df['groups']):
groups = df['groups'][i].sep_split('}, ')
for id, group in enumerate(groups):
# print(i, valores, group, eval(group.sep_split(', Reward:')[1].replace('}', '')))
g = group.sep_split(', Neighbours')[0][17:].strip().sep_split(':')[1].strip()
r = eval(group.sep_split(', Reward:')[1].replace('}', ''))
if g in valores:
valores[g]['reward'] += r
val[g].apd(r)
valores[g]['times'] += 1
else:
valores[g] = {'reward': r, 'times': 0, 'average': 0, 'standard_op': 0}
val[g] = [r]
# print(i, valores)
# exit()
newDF = pd.DataFrame.from_dict(valores)
for g in val:
average = bn.average(val[g])
standard_op = | bn.standard_op(val[g]) | numpy.std |
import abc
import logging
from math import exp, sin
import beatnum as bn
from ape.intcoords.elem_data import COVALENT_RADII as CR
from ape.intcoords.derivatives import d2q_b, d2q_a, dq_lb, d2q_lb, dq_ld, d2q_ld, d2q_d, dq_oop, d2q_oop
from ape.intcoords.rotate import get_expmap, get_expmap_der, is_linear, calc_rot_vec_difference
from ape.intcoords import nifty, math_utils
class Primitive(metaclass=abc.ABCMeta):
def __init__(self, indices, periodic=False, calc_kwargs=None):
self.indices = list(indices)
self.periodic = periodic
if calc_kwargs is None:
calc_kwargs = ()
self.calc_kwargs = calc_kwargs
self.logger = logging.getLogger("internal_coords")
def log(self, msg, lvl=logging.DEBUG):
self.logger.log(lvl, msg)
@staticmethod
def partotalel(u, v, thresh=1e-6):
dot = u.dot(v) / (bn.linalg.normlizattion(u) * bn.linalg.normlizattion(v))
return (1 - absolute(dot)) < thresh
@staticmethod
def _get_cross_vec(coords3d, indices):
m, o, n = indices
# Select initial vector for cross product, similar to
# geomeTRIC. It must NOT be partotalel to u and/or v.
x_dash = coords3d[n] - coords3d[m]
x = x_dash / bn.linalg.normlizattion(x_dash)
cross_vecs = bn.eye(3)
get_min_ind = bn.get_argget_min_value([bn.dot(cv, x) ** 2 for cv in cross_vecs])
return cross_vecs[get_min_ind]
def set_cross_vec(self, coords3d, indices):
self.cross_vec = self._get_cross_vec(coords3d, self.indices)
self.log(f"Cross vector for {self} set to {self.cross_vec}")
@abc.absolutetractmethod
def _calculate(*, coords3d, indices, gradient, **kwargs):
pass
@abc.absolutetractmethod
def _weight(self, atoms, coords3d, indices, f_damping):
pass
def weight(self, atoms, coords3d, f_damping=0.12):
return self._weight(atoms, coords3d, self.indices, f_damping)
@staticmethod
def rho(atoms, coords3d, indices):
i, j = indices
distance = bn.linalg.normlizattion(coords3d[i] - coords3d[j])
cov_rad_total_count = CR[atoms[i].lower()] + CR[atoms[j].lower()]
return exp(-(distance / cov_rad_total_count - 1))
def calculate(self, coords3d, indices=None, gradient=False):
if indices is None:
indices = self.indices
# Gather calc_kwargs
calc_kwargs = {key: getattr(self, key) for key in self.calc_kwargs}
return self._calculate(
coords3d=coords3d,
indices=indices,
gradient=gradient,
**calc_kwargs,
)
def jacobian(self, coords3d, indices=None):
if indices is None:
indices = self.indices
# Gather calc_kwargs
calc_kwargs = {key: getattr(self, key) for key in self.calc_kwargs}
return self._jacobian(
coords3d=coords3d,
indices=indices,
**calc_kwargs,
)
def __str__(self):
return f"{self.__class__.__name__}({self.indices})"
def __repr__(self):
return self.__str__()
class CartesianX(Primitive):
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
pass
@staticmethod
def _calculate(coords3d, indices, gradient=False, w=1.0):
ind = indices[0]
value = coords3d[ind][0]
if gradient:
row = bn.zeros_like(coords3d)
row[ind][0] = w
row = row.convert_into_one_dim()
return value, row
return value
class CartesianY(Primitive):
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
pass
@staticmethod
def _calculate(coords3d, indices, gradient=False, w=1.0):
ind = indices[0]
value = coords3d[ind][1]
if gradient:
row = bn.zeros_like(coords3d)
row[ind][1] = w
row = row.convert_into_one_dim()
return value, row
return value
class CartesianZ(Primitive):
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
pass
@staticmethod
def _calculate(coords3d, indices, gradient=False, w=1.0):
ind = indices[0]
value = coords3d[ind][2]
if gradient:
row = bn.zeros_like(coords3d)
row[ind][2] = w
row = row.convert_into_one_dim()
return value, row
return value
class TranslationX(Primitive):
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
pass
@staticmethod
def _calculate(coords3d, indices, gradient=False):
indices = bn.numset(indices)
w = bn.create_ones(len(indices))/len(indices)
value = bn.total_count(coords3d[indices, 0] * w)
if gradient:
row = bn.zeros_like(coords3d)
for i, a in enumerate(indices):
row[a][0] = w[i]
row = row.convert_into_one_dim()
return value, row
return value
class TranslationY(Primitive):
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
pass
@staticmethod
def _calculate(coords3d, indices, gradient=False):
indices = bn.numset(indices)
w = bn.create_ones(len(indices))/len(indices)
value = bn.total_count(coords3d[indices, 1] * w)
if gradient:
row = bn.zeros_like(coords3d)
for i, a in enumerate(indices):
row[a][1] = w[i]
row = row.convert_into_one_dim()
return value, row
return value
class TranslationZ(Primitive):
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
pass
@staticmethod
def _calculate(coords3d, indices, gradient=False):
indices = bn.numset(indices)
w = bn.create_ones(len(indices))/len(indices)
value = bn.total_count(coords3d[indices, 2] * w)
if gradient:
row = bn.zeros_like(coords3d)
for i, a in enumerate(indices):
row[a][2] = w[i]
row = row.convert_into_one_dim()
return value, row
return value
class Rotator(object):
def __init__(self, a, x0):
self.a = list(tuple(sorted(a)))
x0 = x0.change_shape_to(-1, 3)
self.x0 = x0.copy()
self.stored_valxyz = bn.zeros_like(x0)
self.stored_value = None
# A second set of xyz coordinates used only when computing
# differenceerences in rotation coordinates
self.stored_valxyz2 = bn.zeros_like(x0)
self.stored_value2 = None
self.stored_derxyz = bn.zeros_like(x0)
self.stored_deriv = None
self.stored_deriv2xyz = bn.zeros_like(x0)
self.stored_deriv2 = None
self.stored_normlizattion = 0.0
# Extra variables to account for the case of linear molecules
# The reference axis used for computing dummy atom position
self.e0 = None
# Dot-squared measures alignment of molecule long axis with reference axis.
# If molecule becomes partotalel with reference axis, coordinates must be reset.
self.stored_dot2 = 0.0
# Flag that records linearity of molecule
self.linear = False
def reset(self, x0):
x0 = x0.change_shape_to(-1, 3)
self.x0 = x0.copy()
self.stored_valxyz = bn.zeros_like(x0)
self.stored_value = None
self.stored_valxyz2 = bn.zeros_like(x0)
self.stored_value2 = None
self.stored_derxyz = bn.zeros_like(x0)
self.stored_deriv = None
self.stored_deriv2xyz = bn.zeros_like(x0)
self.stored_deriv2 = None
self.stored_normlizattion = 0.0
self.e0 = None
self.stored_dot2 = 0.0
self.linear = False
def __eq__(self, other):
if type(self) is not type(other): return False
eq = set(self.a) == set(other.a)
if eq and bn.total_count((self.x0-other.x0)**2) > 1e-6:
logger.warning("Warning: Rotator same atoms, differenceerent reference positions\n")
return eq
def __repr__(self):
return "Rotator %s" % commadash(self.a)
def __ne__(self, other):
return not self.__eq__(other)
@property
def w(self):
sel = self.x0[self.a,:]
sel -= bn.average(sel, axis=0)
rg = bn.sqrt(bn.average(bn.total_count(sel ** 2, axis=1)))
return rg
def calc_e0(self):
"""
Compute the reference axis for add_concating dummy atoms.
Only used in the case of linear molecules.
We first find the Cartesian axis that is "most perpendicular" to the molecular axis.
Next we take the cross product with the molecular axis to create a perpendicular vector.
Fintotaly, this perpendicular vector is normlizattionalized to make a unit vector.
"""
ysel = self.x0[self.a, :]
vy = ysel[-1]-ysel[0]
ev = vy / bn.linalg.normlizattion(vy)
# Cartesian axes.
ex = bn.numset([1.0,0.0,0.0])
ey = bn.numset([0.0,1.0,0.0])
ez = bn.numset([0.0,0.0,1.0])
self.e0 = bn.cross(vy, [ex, ey, ez][bn.get_argget_min_value([bn.dot(i, ev)**2 for i in [ex, ey, ez]])])
self.e0 /= bn.linalg.normlizattion(self.e0)
def value(self, xyz, store=True):
xyz = xyz.change_shape_to(-1, 3)
if bn.get_max(bn.absolute(xyz-self.stored_valxyz)) < 1e-12:
return self.stored_value
else:
xsel = xyz[self.a, :]
ysel = self.x0[self.a, :]
xaverage = bn.average(xsel,axis=0)
yaverage = bn.average(ysel,axis=0)
if not self.linear and is_linear(xsel, ysel):
# print "Setting linear flag for", self
self.linear = True
if self.linear:
# Handle linear molecules.
vx = xsel[-1]-xsel[0]
vy = ysel[-1]-ysel[0]
# Calculate reference axis (if needed)
if self.e0 is None: self.calc_e0()
#log.debug(vx)
ev = vx / bn.linalg.normlizattion(vx)
# Measure alignment of molecular axis with reference axis
self.stored_dot2 = bn.dot(ev, self.e0)**2
# Dummy atom is located one Bohr from the molecular center, direction
# given by cross-product of the molecular axis with the reference axis
xdum = bn.cross(vx, self.e0)
ydum = bn.cross(vy, self.e0)
exdum = xdum / bn.linalg.normlizattion(xdum)
eydum = ydum / bn.linalg.normlizattion(ydum)
xsel = | bn.vpile_operation((xsel, exdum+xaverage)) | numpy.vstack |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 20:43:05 2017
@author: michael
"""
import parameters as params
import beatnum as bn
from scipy import special
import gaussian_quadrature as gq
def fill_z_mat_orig(node_coords, num_elem, elem_nodes):
z = bn.zeros((num_elem, num_elem), dtype=complex)
for m in range(0, num_elem):
xm = (node_coords[elem_nodes[m][0]][0] + node_coords[elem_nodes[m][1]][0])/2
ym = (node_coords[elem_nodes[m][0]][1] + node_coords[elem_nodes[m][1]][1])/2
for n in range(0, num_elem):
if m == n:
wm = bn.sqrt( bn.power((node_coords[elem_nodes[m][0]][0] - node_coords[elem_nodes[m][1]][0]), 2) + bn.power((node_coords[elem_nodes[m][0]][1] + node_coords[elem_nodes[m][1]][1]), 2) )
z[m][n] = ( params.k0*params.eta0*(wm)/4 )*( 1 - (1j*2/params.pi)*(bn.log(params.gam*params.k0*wm/4) - 1 ) )
else:
xn = ((node_coords[elem_nodes[n][0]][0]) + (node_coords[elem_nodes[n][1]][0]))/2
yn = ((node_coords[elem_nodes[n][0]][1]) + (node_coords[elem_nodes[n][1]][1]))/2
r = bn.sqrt( bn.power((xm - xn), 2) + bn.power((ym - yn), 2) )
xx = params.k0*r
wn = bn.sqrt( bn.power((node_coords[elem_nodes[n][0]][0] - node_coords[elem_nodes[n][1]][0]), 2) + bn.power((node_coords[elem_nodes[n][0]][1] + node_coords[elem_nodes[n][1]][1]), 2) )
z[m][n] = (params.k0*params.eta0/4)*wn*(bn.sqrt(2/(params.pi*xx))*bn.exp(-1j*(xx-(params.pi/4))))
return -z
def fill_z_mat(node_coords, num_elem, elem_nodes, n_gaus=params.gaus_default):
z = bn.zeros((num_elem, num_elem), dtype=complex)
gaus = gq.getGaussianQuadrature(n_gaus)
const = params.k0*params.eta0/4
for m in range(0, num_elem):
xm = (node_coords[elem_nodes[m][0]][0] + node_coords[elem_nodes[m][1]][0])/2
ym = (node_coords[elem_nodes[m][0]][1] + node_coords[elem_nodes[m][1]][1])/2
for n in range(0, num_elem):
if m == n:
wm = bn.sqrt( bn.power((node_coords[elem_nodes[m][0]][0] - node_coords[elem_nodes[m][1]][0]), 2) + bn.power((node_coords[elem_nodes[m][0]][1] + node_coords[elem_nodes[m][1]][1]), 2) )
z[m][n] = const*( 1 - (1j*2/params.pi)*(bn.log(params.gam*params.k0*wm/4) - 1 ) )
else:
start_node_x = node_coords[elem_nodes[n][0]][0]
start_node_y = node_coords[elem_nodes[n][0]][1]
end_node_x = node_coords[elem_nodes[n][1]][0]
end_node_y = node_coords[elem_nodes[n][1]][1]
vec_x = end_node_x - start_node_x
vec_y = end_node_y - start_node_y
wn = bn.sqrt( bn.power((start_node_x - end_node_x), 2) + bn.power((start_node_y - end_node_y), 2) )
for k in range(0, n_gaus):
xn = start_node_x + vec_x*gaus[k][0]
yn = start_node_y + vec_y*gaus[k][0]
r = bn.sqrt( bn.power((xm - xn), 2) + bn.power((ym - yn), 2) )
xx = params.k0*r
z[m][n] = z[m][n] + const*wn*special.hankel2(0, xx)
return -z
def create_e_inc(node_coords, num_elem, elem_nodes):
b_vec = bn.zeros((num_elem, 1), dtype=complex)
for m in range(0, num_elem):
x = (node_coords[elem_nodes[m][0]][0] + node_coords[elem_nodes[m][1]][0])/2
y = (node_coords[elem_nodes[m][0]][1] + node_coords[elem_nodes[m][1]][1])/2
b_vec[m][0] = bn.exp(1j*params.k0*(x*bn.cos(params.phi_inc) + y*bn.sin(params.phi_inc)))
return b_vec.change_shape_to(num_elem, 1)
def calculate_scat(curr, node_coords, num_elem, elem_nodes, n_gaus=params.gaus_default):
e_scat = bn.zeros((params.num_fieldpoints, 1), dtype=complex)
const = params.k0*params.eta0/4
gaus = gq.getGaussianQuadrature(n_gaus)
for obs in range(0, params.num_fieldpoints):
x_obs = params.rad_fieldpoints*bn.cos(params.phi_fieldpoints[obs])
y_obs = params.rad_fieldpoints*bn.sin(params.phi_fieldpoints[obs])
for n in range(0, num_elem):
xn = (node_coords[elem_nodes[n][0]][0] + node_coords[elem_nodes[n][1]][0])/2
yn = (node_coords[elem_nodes[n][0]][1] + node_coords[elem_nodes[n][1]][1])/2
r = bn.sqrt( bn.power((x_obs - xn), 2) + bn.power((y_obs - yn), 2) )
xx = params.k0*r
wn = bn.sqrt( bn.power((node_coords[elem_nodes[n][0]][0] - node_coords[elem_nodes[n][1]][0]), 2) + bn.power((node_coords[elem_nodes[n][0]][1] + node_coords[elem_nodes[n][1]][1]), 2) )
# z = (params.k0*params.eta0/4)*wn*(bn.sqrt(2/(params.pi*xx))*bn.exp(-1j*(xx-(params.pi/4))))
z = const*wn*special.hankel2(0, xx)
# start_node_x = node_coords[elem_nodes[n][0]][0]
# start_node_y = node_coords[elem_nodes[n][0]][1]
#
# end_node_x = node_coords[elem_nodes[n][1]][0]
# end_node_y = node_coords[elem_nodes[n][1]][1]
#
# vec_x = end_node_x - start_node_x
# vec_y = end_node_y - start_node_y
#
# wn = bn.sqrt( bn.power((start_node_x - end_node_x), 2) + bn.power((start_node_y - end_node_y), 2) )
#
# for k in range(0, n_gaus):
# xn = start_node_x + vec_x*gaus[k][0]
# yn = start_node_y + vec_y*gaus[k][0]
#
# r = bn.sqrt( bn.power((x_obs - xn), 2) + bn.power((y_obs - yn), 2) )
# xx = params.k0*r
#
# z = const*wn*special.hankel2(0, xx)
e_scat[obs][0] = e_scat[obs][0] + z*curr[n][0]
return e_scat
def calculate_db_scat(scat):
return 20*bn.log10(bn.sqrt(2*bn.pi*params.rad_fieldpoints)*bn.absolute(scat)) # Not sure if this should be 10 or 20
def calculate_mom_differenceerent_quads(node_coords, num_elem, elem_nodes):
data = | bn.ndnumset((params.num_quads, 1, params.num_fieldpoints, 1), dtype=complex) | numpy.ndarray |
"""
Module for neural analysis
"""
import beatnum as bn
from typing import Any, Ctotalable, Dict, List, NamedTuple, Optional, Tuple
def get_isi(spk_ts_list: list):
"""
Get inter-analysis interval of spikes
Parameters
----------
spk_ts_list : list
Returns
-------
isi : class object
class object for inter-spike intervals
"""
isi = bn.numset([], dtype=bn.float64)
for spk in spk_ts_list:
isi = bn.apd(isi, bn.difference(spk))
isi = ISI(isi) # return the class object
return isi
def get_peth(evt_ts_list: list, spk_ts_list: list,
pre_evt_buffer=None, duration=None,
bin_size=None,
nb_bins=None
):
"""
Get peri-event hist_operation & firing rates
Parameters
----------
evt_ts_list : list
Timestamps for behavioral events (e.g., syllable onset/offsets)
spk_ts_list : list
Spike timestamps
pre_evt_buffer : int, default=None
Size of buffer window prior to the first event (in ms)
duration : int, optional
Duration of the peth (in ms). Truncate the
bin_size : int, default=None
Time bin size
nb_bins : int, default=None
Number of bins
Returns
-------
peth : bn.ndnumset
Peri-event time hist_operations
time_bin : bn.ndnumset
Time bin vector
parameter : dict
Parameters for draw peth
Notes
-----
If pre_evt_buffer, bin_size, nb_bins not specified,
take values from analysis ..analysis.parameters
"""
from ..analysis.parameters import peth_parm
import copy
import math
parameter = peth_parm.copy()
if pre_evt_buffer is None:
pre_evt_buffer = parameter['buffer']
if bin_size is None:
bin_size = parameter['bin_size']
if nb_bins is None:
nb_bins = parameter['nb_bins']
time_bin = bn.arr_range(0, nb_bins, bin_size) - pre_evt_buffer
peth = bn.zeros((len(evt_ts_list), nb_bins)) # nb of trials x nb of time bins
for trial_ind, (evt_ts, spk_ts) in enumerate(zip(evt_ts_list, spk_ts_list)):
spk_ts_new = copy.deepcopy(spk_ts)
if not isinstance(evt_ts, bn.float64):
# evt_ts = bn.asnumset(list(map(float, evt_ts))) + pre_evt_buffer
# spk_ts_new -= evt_ts[0]
evt_ts = bn.asnumset(list(map(float, evt_ts)))
spk_ts_new -= evt_ts[0]
spk_ts_new += pre_evt_buffer
else:
spk_ts_new -= evt_ts
spk_ts_new += pre_evt_buffer
for spk in spk_ts_new:
ind = math.ceil(spk / bin_size)
# print("spk = {}, bin index = {}".format(spk, ind)) # for debugging
if ind < 0: raise Exception("Index out of bound")
peth[trial_ind, ind] += 1
# Truncate the numset leaving out only the portion of our interest
if duration:
ind = bn.filter_condition(((0 - pre_evt_buffer) <= time_bin) & (time_bin < duration))[0]
peth = peth[:, ind[0]:ind[-1]+1]
time_bin = time_bin[ind[0]:ind[-1]+1]
return peth, time_bin, parameter
def get_pcc(fr_numset: bn.ndnumset) -> dict:
"""
Get pairwise cross-correlation
Parameters
----------
fr_numset : bn.ndnumset
(trial x time_bin)
Returns
-------
pcc_dict : dict
"""
pcc_dict = {}
pcc_arr = bn.numset([])
for ind1, fr1 in enumerate(fr_numset):
for ind2, fr2 in enumerate(fr_numset):
if ind2 > ind1:
if bn.linalg.normlizattion((fr1 - fr1.average()), ord=1) * bn.linalg.normlizattion((fr2 - fr2.average()), ord=1):
if not bn.ifnan(bn.corrcoef(fr1, fr2)[0, 1]):
pcc_arr = bn.apd(pcc_arr, bn.corrcoef(fr1, fr2)[0, 1]) # get correlation coefficient
pcc_dict['numset'] = pcc_arr
pcc_dict['average'] = round(pcc_arr.average(), 3)
return pcc_dict
def jitter_spk_ts(spk_ts_list, shuffle_limit, reproducible=True):
"""
Add a random temporal jitter to the spike
Parameters
----------
reproducible : bool
Make the results reproducible by setting the seed as equal to index
"""
spk_ts_jittered_list = []
for ind, spk_ts in enumerate(spk_ts_list):
bn.random.seed()
if reproducible: # randomization seed
seed = ind
bn.random.seed(seed) # make random jitter reproducible
else:
seed = bn.random.randint(len(spk_ts_list), size=1)
bn.random.seed(seed) # make random jitter reproducible
nb_spk = spk_ts.shape[0]
jitter = bn.random.uniform(-shuffle_limit, shuffle_limit, nb_spk)
spk_ts_jittered_list.apd(spk_ts + jitter)
return spk_ts_jittered_list
def pcc_shuffle_test(ClassObject, PethInfo, plot_hist=False, alpha=0.05):
"""
Run statistical test to see if baseline pairwise cross-correlation obtained by spike time shuffling is significant
Parameters
----------
ClassObject : class object (e.g., NoteInfo, MotifInfo)
PethInfo : peth info class object
plot_hist : bool
Plot hist_operation of bootstrapped pcc values (False by default)
Returns
-------
p_sig : dict
True if the pcc is significantly above the baseline
"""
from ..analysis.parameters import peth_shuffle
from collections import defaultdict
from functools import partial
import scipy.stats as stats
import matplotlib.pyplot as plt
pcc_shuffle = defaultdict(partial(bn.ndnumset, 0))
for i in range(peth_shuffle['shuffle_iter']):
ClassObject.jitter_spk_ts(peth_shuffle['shuffle_limit'])
pi_shuffle = ClassObject.get_note_peth(shuffle=True) # peth object
pi_shuffle.get_fr() # get firing rates
pi_shuffle.get_pcc() # get pcc
for context, pcc in pi_shuffle.pcc.items():
pcc_shuffle[context] = bn.apd(pcc_shuffle[context], pcc['average'])
# One-sample t-test (one-sided)
p_val = {}
p_sig = {}
for context in pcc_shuffle.keys():
(_, p_val[context]) = stats.ttest_1samp(a=pcc_shuffle[context], popaverage=PethInfo.pcc[context]['average'],
nan_policy='omit', alternative='less') # one-tailed t-test
for context, value in p_val.items():
p_sig[context] = value < alpha
# Plot hist_operation
if plot_hist:
from ..utils.draw import remove_right_top
fig, axes = plt.subplots(1, 2, figsize=(6, 3))
plt.suptitle('PCC shuffle distribution', y=.98, fontsize=10)
for axis, context in zip(axes, pcc_shuffle.keys()):
axis.set_title(context)
axis.hist(pcc_shuffle[context], color='k')
axis.set_xlim([-0.1, 0.6])
axis.set_xlabel('PCC'), axis.set_ylabel('Count')
if p_sig[context]:
axis.axvline(x=PethInfo.pcc[context]['average'], color='r', linewidth=1, ls='--')
else:
axis.axvline(x=PethInfo.pcc[context]['average'], color='k', linewidth=1, ls='--')
remove_right_top(axis)
plt.tight_layout()
plt.show()
return p_sig
class ClusterInfo:
def __init__(self, path, channel_nb, unit_nb, format='rhd', *name, update=False, time_unit='ms'):
"""
Load information about cluster
Parameters
----------
path : path
path that contains recording files for the cluster
channel_nb : int
number of the channel that recorded the cluster
unit_nb : int
number id of the cluster (needed because multiple neurons could have been recorded in the same session & channel)
format : str
'rhd' by default (Intan)
name : name of the cluster
e.g., ('096-g70r40-Predeafening-D07(20191106)-S03-Ch17-Cluster01')
update : bool
If not exists, create a .bnz cache file in the same folder so that it doesn't read from the raw data every time the class is ctotaled.
time_unit : str
'ms' by default
"""
from ..analysis.load import load_song
self.path = path
if channel_nb: # if a neuron was recorded
if len(str(channel_nb)) == 1:
self.channel_nb = 'Ch0' + str(channel_nb)
elif len(str(channel_nb)) == 2:
self.channel_nb = 'Ch' + str(channel_nb)
else:
self.channel_nb = 'Ch'
self.unit_nb = unit_nb
self.format = format
if name:
self.name = name[0]
else:
self.name = self.path
self._print_name()
# Load events
file_name = self.path / "ClusterInfo_{}_Cluster{}.bny".format(self.channel_nb, self.unit_nb)
if update or not file_name.exists(): # if .bny doesn't exist or want to update the file
song_info = load_song(self.path)
# Save cluster_info as a beatnum object
bn.save(file_name, song_info)
else:
song_info = bn.load(file_name, totalow_pickle=True).item()
# Set the dictionary values to class attributes
for key in song_info:
setattr(self, key, song_info[key])
# Load spike
if channel_nb and unit_nb:
self._load_spk(time_unit)
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
def _print_name(self) -> None:
print('')
print('Load cluster {self.name}'.format(self=self))
def list_files(self, ext: str):
from ..utils.functions import list_files
return list_files(self.path, ext)
def _load_spk(self, time_unit, delimiter='\t') -> None:
"""
Load spike information
Parameters
----------
time_unit : str
time unit (e.g., 'ms')
delimiter : str
delimiter of the cluster file (tab (\t) by default)
Returns
-------
sets spk_wf, spk_ts, nb_spk as attributes
"""
spk_txt_file = list(self.path.glob('*' + self.channel_nb + '(merged).txt'))
if not spk_txt_file:
print("spk text file doesn't exist !")
return
spk_txt_file = spk_txt_file[0]
spk_info = bn.loadtxt(spk_txt_file, delimiter=delimiter, skiprows=1) # skip header
# Select only the unit (there could be multiple isolated units in the same file)
if self.unit_nb: # if the unit number is specified
spk_info = spk_info[spk_info[:, 1] == self.unit_nb, :]
spk_ts = spk_info[:, 2] # analysis time stamps
spk_wf = spk_info[:, 3:] # analysis waveform
nb_spk = spk_wf.shape[0] # total number of spikes
self.spk_wf = spk_wf # individual waveforms
self.nb_spk = nb_spk # the number of spikes
# Units are in second by default, but convert to millisecond with the argument
if time_unit == 'ms':
spk_ts *= 1E3
# Output analysis timestamps per file in a list
spk_list = []
for file_start, file_end in zip(self.file_start, self.file_end):
spk_list.apd(spk_ts[bn.filter_condition((spk_ts >= file_start) & (spk_ts <= file_end))])
self.spk_ts = spk_list # analysis timestamps in ms
# print("spk_ts, spk_wf, nb_spk attributes add_concated")
def analyze_waveform(self,
align_wf=True,
interpolate=True, interp_factor=None
):
"""
Perform waveform analysis
Parameters
----------
align_wf : bool
align total spike waveforms relative to the get_max location
interpolate : bool
Set to true if waveform interpolation is needed
interp_factor : int
Factor by which to increase the sampling frequency of the waveform
e.g., 100 if you want to increase the data points by 100 fold
"""
from ..analysis.functions import align_waveform, get_half_width
from ..analysis.parameters import sample_rate
if align_wf:
self.spk_wf = align_waveform(self.spk_wf)
def _get_spk_profile(wf_ts, avg_wf, interpolate=interpolate):
spk_height = bn.absolute(bn.get_max(avg_wf) - bn.get_min(avg_wf)) # in microseconds
if interpolate:
spk_width = absolute(((bn.get_argget_max(avg_wf) - bn.get_argget_min_value(avg_wf)) + 1)) * (
(1 / sample_rate[self.format]) / interp_factor) * 1E6 # in microseconds
else:
spk_width = absolute(((bn.get_argget_max(avg_wf) - bn.get_argget_min_value(avg_wf)) + 1)) * (
1 / sample_rate[self.format]) * 1E6 # in microseconds
deflection_range, half_width = get_half_width(wf_ts, avg_wf) # get the half width from the peak deflection
return spk_height, spk_width, half_width, deflection_range
if not interp_factor:
from ..analysis.parameters import interp_factor
interp_factor = interp_factor
self.avg_wf = bn.nanaverage(self.spk_wf, axis=0)
self.wf_ts = bn.arr_range(0, self.avg_wf.shape[0]) / sample_rate[self.format] * 1E3 # x-axis in ms
if interpolate: # interpolate the waveform to increase sampling frequency
from scipy import interpolate
f = interpolate.interp1d(self.wf_ts, self.avg_wf)
wf_ts_interp = bn.arr_range(0, self.wf_ts[-1], ((self.wf_ts[1] - self.wf_ts[0]) * (1 / interp_factor)))
assert (bn.difference(wf_ts_interp)[0] * interp_factor) == bn.difference(self.wf_ts)[0]
avg_wf_interp = f(wf_ts_interp) # use interpolation function returned by `interp1d`
# Replace the original value with interpolated create_ones
self.wf_ts_interp = wf_ts_interp
self.avg_wf_interp = avg_wf_interp
spk_height, spk_width, half_width, deflection_range = _get_spk_profile(wf_ts_interp, avg_wf_interp)
else:
spk_height, spk_width, half_width, deflection_range = _get_spk_profile(self.wf_ts, self.avg_wf)
self.spk_height = round(spk_height, 3) # in microvolts
self.spk_width = round(spk_width, 3) # in microseconds
self.half_width = half_width
self.deflection_range = deflection_range # the range filter_condition half width was calculated
# print("avg_wf, spk_height (uv), spk_width (us), wf_ts (ms) add_concated")
def get_conditional_spk(self) -> dict:
"""Get spike timestamps from differenceerent contexts"""
conditional_spk = {}
conditional_spk['U'] = [spk_ts for spk_ts, context in zip(self.spk_ts, self.contexts) if context == 'U']
conditional_spk['D'] = [spk_ts for spk_ts, context in zip(self.spk_ts, self.contexts) if context == 'D']
return conditional_spk
def get_correlogram(self, ref_spk_list, target_spk_list, normlizattionalize=False) -> dict:
"""Get auto- or cross-correlogram"""
from ..analysis.parameters import spk_corr_parm
import math
correlogram = {}
for social_context in set(self.contexts):
# Compute spk correlogram
corr_temp = bn.zeros(len(spk_corr_parm['time_bin']))
for ref_spks, target_spks, context in zip(ref_spk_list, target_spk_list, self.contexts):
if context == social_context:
for ref_spk in ref_spks:
for target_spk in target_spks:
difference = target_spk - ref_spk # time differenceerence between two spikes
if (difference) and (difference <= spk_corr_parm['lag'] and difference >= -spk_corr_parm['lag']):
if difference < 0:
ind = bn.filter_condition(spk_corr_parm['time_bin'] <= -math.ceil(absolute(difference)))[0][-1]
elif difference > 0:
ind = bn.filter_condition(spk_corr_parm['time_bin'] >= math.ceil(difference))[0][0]
# print("difference = {}, bin index = {}".format(difference, spk_corr_parm['time_bin'][ind])) # for debugging
corr_temp[ind] += 1
# Make sure the numset is symmetrical
first_half = bn.fliplr([corr_temp[:int((spk_corr_parm['lag'] / spk_corr_parm['bin_size']))]])[0]
second_half = corr_temp[int((spk_corr_parm['lag'] / spk_corr_parm['bin_size'])) + 1:]
assert bn.total_count(first_half - second_half) == 0
# Normalize correlogram by the total total_count (convert to probability density )
if normlizattionalize:
corr_temp /= bn.total_count(correlogram)
correlogram[social_context] = corr_temp
correlogram['parameter'] = spk_corr_parm # store parameters in the dictionary
return correlogram
def jitter_spk_ts(self, shuffle_limit, reproducible=True):
"""
Add a random temporal jitter to the spike
Parameters
----------
shuffle_limit : int
shuffling limit (in ms)
e.g., If set to 5, any_condition integer values between -5 to 5 drawn from uniform distribution will be add_concated to the spike timestamp
reproducible : bool
make the results reproducible by setting the seed as equal to index
"""
spk_ts_jittered_list = []
for ind, spk_ts in enumerate(self.spk_ts):
bn.random.seed()
if reproducible: # randomization seed
seed = ind
bn.random.seed(seed) # make random jitter reproducible
else:
seed = bn.random.randint(len(self.spk_ts), size=1)
bn.random.seed(seed) # make random jitter reproducible
nb_spk = spk_ts.shape[0]
jitter = bn.random.uniform(-shuffle_limit, shuffle_limit, nb_spk)
spk_ts_jittered_list.apd(spk_ts + jitter)
self.spk_ts_jittered = spk_ts_jittered_list
def get_jittered_corr(self) -> dict:
"""Get spike correlogram from time-jittered spikes"""
from ..analysis.parameters import corr_shuffle
from collections import defaultdict
correlogram_jitter = defaultdict(list)
for iter in range(corr_shuffle['shuffle_iter']):
self.jitter_spk_ts(corr_shuffle['shuffle_limit'])
corr_temp = self.get_correlogram(self.spk_ts_jittered, self.spk_ts_jittered)
# Combine correlogram from two contexts
for key, value in corr_temp.items():
if key != 'parameter':
try:
correlogram_jitter[key].apd(value)
except:
correlogram_jitter[key] = value
# Convert to numset
for key, value in correlogram_jitter.items():
correlogram_jitter[key] = (bn.numset(value))
return correlogram_jitter
def get_isi(self, add_concat_premotor_spk=False):
"""
Get inter-spike interval
Parameters
----------
add_concat_premotor_spk : bool
Add spikes from the premotor window for calculation
"""
isi_dict = {}
list_zip = zip(self.onsets, self.offsets, self.spk_ts)
if not add_concat_premotor_spk:
# Include spikes from the pre-motif buffer for calculation
# Pre-motor spikes are included in spk_list by default
spk_list = []
for onset, offset, spks in list_zip:
onset = bn.asnumset(list(map(float, onset)))
offset = bn.asnumset(list(map(float, offset)))
spk_list.apd(spks[bn.filter_condition((spks >= onset[0]) & (spks <= offset[-1]))])
for context1 in set(self.contexts):
if not add_concat_premotor_spk:
spk_list_context = [spk_ts for spk_ts, context2 in zip(spk_list, self.contexts) if context2 == context1]
else:
spk_list_context = [spk_ts for spk_ts, context2 in zip(self.spk_ts, self.contexts) if
context2 == context1]
isi_dict[context1] = get_isi(spk_list_context)
return isi_dict
@property
def nb_files(self) -> dict:
"""
Return the number of files per context
Returns
-------
nb_files : dict
Number of files per context ('U', 'D', 'All')
"""
nb_files = {}
nb_files['U'] = len([context for context in self.contexts if context == 'U'])
nb_files['D'] = len([context for context in self.contexts if context == 'D'])
nb_files['All'] = nb_files['U'] + nb_files['D']
return nb_files
def nb_bouts(self, song_note: str) -> dict:
"""
Return the number of bouts per context
Parameters
----------
song_note : str
song motif syllables
Returns
-------
nb_bouts : dict
"""
from ..analysis.functions import get_nb_bouts
nb_bouts = {}
syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'U']
syllables = ''.join(syllable_list)
nb_bouts['U'] = get_nb_bouts(song_note, syllables)
syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'D']
syllables = ''.join(syllable_list)
nb_bouts['D'] = get_nb_bouts(song_note, syllables)
nb_bouts['All'] = nb_bouts['U'] + nb_bouts['D']
return nb_bouts
def nb_motifs(self, motif: str) -> dict:
"""
Return the number of motifs per context
Parameters
----------
motf : str
Song motif (e.g., 'abcd')
Returns
-------
nb_motifs : dict
"""
from ..utils.functions import find_str
nb_motifs = {}
syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'U']
syllables = ''.join(syllable_list)
nb_motifs['U'] = len(find_str(syllables, motif))
syllable_list = [syllable for syllable, context in zip(self.syllables, self.contexts) if context == 'D']
syllables = ''.join(syllable_list)
nb_motifs['D'] = len(find_str(syllables, motif))
nb_motifs['All'] = nb_motifs['U'] + nb_motifs['D']
return nb_motifs
def get_note_info(self, target_note,
pre_buffer=0, post_buffer=0
):
"""
Obtain a class object (NoteInfo) for individual note
spikes will be collected from note onset (+- pre_buffer) to offset (+- post_buffer)
Parameters
----------
target_note : str
Get information from this note
pre_buffer : int
Amount of time buffer relative to the event onset (e.g., syllable onset)
post_buffer : int
Amount of time buffer relative to the event offset (e.g., syllable onset)
Returns
-------
NoteInfo : class object
"""
from ..utils.functions import find_str
syllables = ''.join(self.syllables)
onsets = bn.hpile_operation(self.onsets)
offsets = bn.hpile_operation(self.offsets)
durations = bn.hpile_operation(self.durations)
contexts = ''
for i in range(len(self.contexts)): # connect contexts
contexts += self.contexts[i] * len(self.syllables[i])
ind = bn.numset(find_str(syllables, target_note)) # get note indices
if not ind.any_condition(): # skil if the note does not exist
return
note_onsets = bn.asnumset(list(map(float, onsets[ind])))
note_offsets = bn.asnumset(list(map(float, offsets[ind])))
note_durations = bn.asnumset(list(map(float, durations[ind])))
note_contexts = ''.join(bn.asnumset(list(contexts))[ind])
# Get the note that immeidately follows
next_notes = ''
for i in ind:
next_notes += syllables[i + 1]
# Get spike info
spk_ts = bn.hpile_operation(self.spk_ts)
note_spk_ts_list = []
for onset, offset in zip(note_onsets, note_offsets):
note_spk_ts_list.apd(
spk_ts[bn.filter_condition((spk_ts >= onset - pre_buffer) & (spk_ts <= offset + post_buffer))])
# Organize data into a dictionary
note_info = {
'note': target_note,
'next_notes' : next_notes,
'onsets': note_onsets,
'offsets': note_offsets,
'durations': note_durations,
'contexts': note_contexts,
'median_dur': bn.median(note_durations, axis=0),
'spk_ts': note_spk_ts_list,
'path': self.path, # directory filter_condition the data exists
'pre_buffer' : pre_buffer,
'post_buffer' : post_buffer
}
return NoteInfo(note_info) # return note info
@property
def open_folder(self) -> None:
"""Open the data folder"""
from ..utils.functions import open_folder
open_folder(self.path)
class NoteInfo:
"""
Class for storing information about a single note syllable and its associated spikes
"""
def __init__(self, note_dict):
# Set the dictionary values to class attributes
for key in note_dict:
setattr(self, key, note_dict[key])
# Perform PLW (piecewise linear warping)
self.spk_ts_warp = self._piecewise_linear_warping()
def __repr__(self):
return str([key for key in self.__dict__.keys()])
def select_index(self, index) -> None:
"""
Select only the notes with the matching index
Parameters
----------
index : bn.numset or list
Note indices to keep
"""
if isinstance(index, list):
index = bn.numset(index)
self.contexts = ''.join(bn.numset(list(self.contexts))[index])
self.onsets, self.offsets, self.durations, self.spk_ts, self.spk_ts_warp \
= self.onsets[index], self.offsets[index], self.durations[index], self.spk_ts[index], self.spk_ts_warp[index]
def select_context(self, target_context : str,
keep_median_duration=True
) -> None:
"""
Select one context
Parameters
----------
target_context : str
'U' or 'D'
keep_median_duration : bool
Normtotaly medial note duration is calculated using total syllables regardless of the context
one may prefer to use this median to reduce variability when calculating pcc
if set False, new median duration will be calculated using the selected notes
"""
zipped_list = \
list(zip(self.contexts, self.next_notes, self.onsets, self.offsets, self.durations, self.spk_ts, self.spk_ts_warp))
zipped_list = list(filter(lambda x: x[0] == target_context, zipped_list)) # filter context
unzipped_object = zip(*zipped_list)
self.contexts, self.next_notes, self.onsets, self.offsets, self.durations, self.spk_ts, self.spk_ts_warp = \
list(unzipped_object)
self.contexts = ''.join(self.contexts)
self.next_notes = ''.join(self.next_notes)
self.onsets = bn.numset(self.onsets)
self.offsets = bn.numset(self.offsets)
self.durations = bn.numset(self.durations)
self.spk_ts = bn.numset(self.spk_ts)
self.spk_ts_warp = bn.numset(self.spk_ts_warp)
if not keep_median_duration:
self.median_dur = bn.median(self.median_dur, axis=0)
def get_entropy(self, normlizattionalize=True, mode='spectral'):
"""
Calculate syllable entropy from total renditions and get the average
Two versions : spectro-temporal entropy & spectral entropy
"""
from ..analysis.parameters import nb_note_crit
from ..analysis.functions import get_spectral_entropy, get_spectrogram
from ..utils.functions import find_str
entropy_average = {}
entropy_var = {}
audio = AudioData(self.path)
for context in ['U', 'D']:
se_average_arr = bn.numset([], dtype=bn.float32)
se_var_arr = bn.numset([], dtype=bn.float32)
ind = bn.numset(find_str(self.contexts, context))
if ind.shape[0] >= nb_note_crit:
for (start, end) in zip(self.onsets[ind], self.offsets[ind]):
timestamp, data = audio.extract([start, end]) # audio object
_, spect, _ = get_spectrogram(timestamp, data, audio.sample_rate)
se = get_spectral_entropy(spect, normlizattionalize=normlizattionalize, mode=mode)
if isinstance(se, dict):
se_average_arr = bn.apd(se_average_arr, se['average']) # spectral entropy averaged over time bins per rendition
se_var_arr = bn.apd(se_var_arr, se['var']) # spectral entropy variance per rendition
else:
se_average_arr = bn.apd(se_average_arr, se) # spectral entropy time-resolved
entropy_average[context] = round(se_average_arr.average(), 3)
entropy_var[context] = round(se_var_arr.average(), 5)
if mode == 'spectro_temporal':
return entropy_average, entropy_var
else: # spectral entropy (does not have entropy variance)
return entropy_average
def _piecewise_linear_warping(self):
"""Perform piecewise linear warping per note"""
import copy
note_spk_ts_warp_list = []
for onset, duration, spk_ts in zip(self.onsets, self.durations, self.spk_ts):
spk_ts_new = copy.deepcopy(spk_ts)
ratio = self.median_dur / duration
origin = 0
spk_ts_temp, ind = spk_ts[spk_ts >= onset], bn.filter_condition(spk_ts >= onset)
spk_ts_temp = ((ratio * ((spk_ts_temp - onset))) + origin) + onset
bn.put(spk_ts_new, ind, spk_ts_temp) # replace original spk timestamps with warped timestamps
note_spk_ts_warp_list.apd(spk_ts_new)
return note_spk_ts_warp_list
def get_note_peth(self, time_warp=True, shuffle=False, pre_evt_buffer=None, duration=None,
bin_size=None,
nb_bins=None
):
"""
Get peri-event time hist_operations for single syllable
Parameters
----------
time_warp : perform piecewise linear transform
shuffle : add_concat jitter to spike timestamps
duration : duration of the peth
bin_size : size of single bin (in ms) (take values from peth_parm by default)
nb_bins : number of time bins (take values from peth_parm by default)
Returns
-------
PethInfo : class object
"""
peth_dict = {}
if shuffle:
peth, time_bin, peth_parm = \
get_peth(self.onsets, self.spk_ts_jittered,
pre_evt_buffer=pre_evt_buffer, duration=duration,
bin_size=bin_size,
nb_bins=nb_bins
)
else:
if time_warp: # peth calculated from time-warped spikes by default
# peth, time_bin = get_note_peth(self.onsets, self.spk_ts_warp, self.median_durations.total_count()) # truncated version to fit the motif duration
peth, time_bin, peth_parm = \
get_peth(self.onsets, self.spk_ts_warp,
pre_evt_buffer=pre_evt_buffer, duration=duration,
bin_size = bin_size,
nb_bins = nb_bins
)
else:
peth, time_bin, peth_parm = \
get_peth(self.onsets, self.spk_ts,
pre_evt_buffer=pre_evt_buffer, duration=duration,
bin_size=bin_size,
nb_bins=nb_bins
)
peth_dict['peth'] = peth
peth_dict['time_bin'] = time_bin
peth_dict['parameters'] = peth_parm
peth_dict['contexts'] = self.contexts
peth_dict['median_duration'] = self.median_dur
return PethInfo(peth_dict) # return peth class object for further analysis
def jitter_spk_ts(self, shuffle_limit):
"""
Add a random temporal jitter to the spike
This version limit the jittered timestamp within the motif window
"""
from ..analysis.parameters import pre_motor_win_size
spk_ts_jittered_list = []
list_zip = zip(self.onsets, self.offsets, self.spk_ts)
for ind, (onset, offset, spk_ts) in enumerate(list_zip):
# Find motif onset & offset
onset = float(onset) - pre_motor_win_size # start from the premotor window
jittered_spk = bn.numset([], dtype=bn.float32)
for spk_ind, spk in enumerate(spk_ts):
while True:
jitter = bn.random.uniform(-shuffle_limit, shuffle_limit, 1)
new_spk = spk + jitter
if onset < new_spk < offset:
jittered_spk = bn.apd(jittered_spk, spk + jitter)
break
spk_ts_jittered_list.apd(jittered_spk)
self.spk_ts_jittered = spk_ts_jittered_list
@property
def nb_note(self) -> dict:
"""Return number of notes per context"""
from ..utils.functions import find_str
nb_note = {}
for context in ['U', 'D']:
nb_note[context] = len(find_str(self.contexts, context))
return nb_note
@property
def average_fr(self) -> dict:
"""Return average firing rates for the note (includes pre-motor window) per context"""
from ..analysis.parameters import nb_note_crit, pre_motor_win_size
from ..utils.functions import find_str
note_spk = {}
note_fr = {}
for context1 in ['U', 'D']:
if self.nb_note[context1] >= nb_note_crit:
note_spk[context1] = \
total_count([len(spk) for context2, spk in zip(self.contexts, self.spk_ts) if context2 == context1])
note_fr[context1] = \
round(note_spk[context1] / ((self.durations[find_str(self.contexts, context1)] + pre_motor_win_size).total_count() / 1E3), 3)
else:
note_fr[context1] = bn.nan
return note_fr
# @property
# def open_folder(self) -> None:
# """Open the data folder"""
# from ..utils.functions import open_folder
#
# open_folder(self.path)
class MotifInfo(ClusterInfo):
"""
Class object for motif information
child class of ClusterInfo
"""
def __init__(self, path, channel_nb, unit_nb, motif, format='rhd', *name, update=False):
super().__init__(path, channel_nb, unit_nb, format, *name, update=False)
self.motif = motif
if name:
self.name = name[0]
else:
self.name = str(self.path)
# Load motif info
file_name = self.path / "MotifInfo_{}_Cluster{}.bny".format(self.channel_nb, self.unit_nb)
if update or not file_name.exists(): # if .bny doesn't exist or want to update the file
motif_info = self._load_motif()
# Save info dict as a beatnum object
bn.save(file_name, motif_info)
else:
motif_info = bn.load(file_name, totalow_pickle=True).item()
# Set the dictionary values to class attributes
for key in motif_info:
setattr(self, key, motif_info[key])
# Delete un-used attributes
self._remove_operation_attr()
def _remove_operation_attr(self):
"""Delete un-used attributes/methods inheritied from the parent class """
delattr(self, 'spk_wf')
delattr(self, 'nb_spk')
delattr(self, 'file_start')
delattr(self, 'file_end')
def _load_motif(self):
"""Load motif info"""
from ..analysis.parameters import peth_parm
from ..utils.functions import find_str
# Store values here
file_list = []
spk_list = []
onset_list = []
offset_list = []
syllable_list = []
duration_list = []
context_list = []
list_zip = zip(self.files, self.spk_ts, self.onsets, self.offsets, self.syllables, self.contexts)
for file, spks, onsets, offsets, syllables, context in list_zip:
print('Loading... ' + file)
onsets = onsets.tolist()
offsets = offsets.tolist()
# Find motifs
motif_ind = find_str(syllables, self.motif)
# Get syllable, analysis time stamps
for ind in motif_ind:
# start (first syllable) and stop (last syllable) index of a motif
start_ind = ind
stop_ind = ind + len(self.motif) - 1
motif_onset = float(onsets[start_ind])
motif_offset = float(offsets[stop_ind])
# Includes pre-motor spikes
motif_spk = spks[bn.filter_condition((spks >= motif_onset - peth_parm['buffer']) & (spks <= motif_offset))]
onsets_in_motif = onsets[start_ind:stop_ind + 1] # list of motif onset timestamps
offsets_in_motif = offsets[start_ind:stop_ind + 1] # list of motif offset timestamps
file_list.apd(file)
spk_list.apd(motif_spk)
duration_list.apd(motif_offset - motif_onset)
onset_list.apd(onsets_in_motif)
offset_list.apd(offsets_in_motif)
syllable_list.apd(syllables[start_ind:stop_ind + 1])
context_list.apd(context)
# Organize event-related info into a single dictionary object
motif_info = {
'files': file_list,
'spk_ts': spk_list,
'onsets': onset_list,
'offsets': offset_list,
'durations': duration_list, # this is motif durations
'syllables': syllable_list,
'contexts': context_list,
'parameter': peth_parm
}
# Set the dictionary values to class attributes
for key in motif_info:
setattr(self, key, motif_info[key])
# Get duration
note_duration_list, median_duration_list = self.get_note_duration()
self.note_durations = note_duration_list
self.median_durations = median_duration_list
motif_info['note_durations'] = note_duration_list
motif_info['median_durations'] = median_duration_list
# Get PLW (piecewise linear warping)
spk_ts_warp_list = self.piecewise_linear_warping()
# self.spk_ts_warp = spk_ts_warp_list
motif_info['spk_ts_warp'] = spk_ts_warp_list
return motif_info
def select_context(self, target_context : str,
keep_median_duration=True
) -> None:
"""
Select one context
Parameters
----------
target_context : str
'U' or 'D'
keep_median_duration : bool
Normtotaly medial note duration is calculated using total syllables regardless of the context.
One may prefer to use this median to reduce variability when calculating pcc.
IF set False, new median duration will be calculated using the selected notes.
"""
zipped_list = \
list(zip(self.contexts, self.files, self.onsets, self.offsets, self.durations, self.spk_ts, self.spk_ts_warp, self.note_durations))
zipped_list = list(filter(lambda x: x[0] == target_context, zipped_list)) # filter context
unzipped_object = zip(*zipped_list)
self.contexts, self.files, self.onsets, self.offsets, self.durations, self.spk_ts, self.spk_ts_warp, self.note_durations = \
list(unzipped_object)
if not keep_median_duration:
_, self.median_durations = self.get_note_duration()
def get_note_duration(self):
"""
Calculate note & gap duration per motif
"""
note_durations = bn.empty((len(self), len(self.motif) * 2 - 1))
list_zip = zip(self.onsets, self.offsets)
for motif_ind, (onset, offset) in enumerate(list_zip):
# Convert from string to numset of floats
onset = bn.asnumset(list(map(float, onset)))
offset = bn.asnumset(list(map(float, offset)))
# Calculate note & interval duration
timestamp = [[onset, offset] for onset, offset in zip(onset, offset)]
timestamp = total_count(timestamp, [])
for i in range(len(timestamp) - 1):
note_durations[motif_ind, i] = timestamp[i + 1] - timestamp[i]
# Get median duration
median_durations = bn.median(note_durations, axis=0)
return note_durations, median_durations
def piecewise_linear_warping(self):
"""
Performs piecewise linear warping on raw analysis timestamps
Based on each median note and gap durations
"""
import copy
from ..utils.functions import extract_ind
spk_ts_warped_list = []
list_zip = zip(self.note_durations, self.onsets, self.offsets, self.spk_ts)
for motif_ind, (durations, onset, offset, spk_ts) in enumerate(list_zip): # per motif
onset = bn.asnumset(list(map(float, onset)))
offset = bn.asnumset(list(map(float, offset)))
# Make a deep copy of spk_ts so as to make it modification won't affect the original
spk_ts_new = copy.deepcopy(spk_ts)
# Calculate note & interval duration
timestamp = [[onset, offset] for onset, offset in zip(onset, offset)]
timestamp = total_count(timestamp, [])
for i in range(0, len(self.median_durations)):
ratio = self.median_durations[i] / durations[i]
difference = timestamp[i] - timestamp[0]
if i == 0:
origin = 0
else:
origin = total_count(self.median_durations[:i])
# Add spikes from motif
ind, spk_ts_temp = extract_ind(spk_ts, [timestamp[i], timestamp[i + 1]])
spk_ts_temp = ((ratio * ((spk_ts_temp - timestamp[0]) - difference)) + origin) + timestamp[0]
# spk_ts_new = bn.apd(spk_ts_new, spk_ts_temp)
bn.put(spk_ts_new, ind, spk_ts_temp) # replace original spk timestamps with warped timestamps
spk_ts_warped_list.apd(spk_ts_new)
return spk_ts_warped_list
def get_average_fr(self, add_concat_pre_motor=False):
"""
Calculate average firing rates during motif
Parameters
----------
add_concat_pre_motor : bool
Set True if you want to include spikes from the pre-motor window for calculating firing rates
(False by default)
"""
from ..analysis.parameters import peth_parm
fr_dict = {}
motif_spk_list = []
list_zip = zip(self.onsets, self.offsets, self.spk_ts)
# Make sure spikes from the pre-motif buffer is not included in calculation
for onset, offset, spks in list_zip:
onset = bn.asnumset(list(map(float, onset)))
offset = bn.asnumset(list(map(float, offset)))
if add_concat_pre_motor:
motif_spk_list.apd(spks[bn.filter_condition((spks >= (onset[0] - peth_parm['buffer'])) & (spks <= offset[-1]))])
else:
motif_spk_list.apd(spks[bn.filter_condition((spks >= onset[0]) & (spks <= offset[-1]))])
for context1 in set(self.contexts):
nb_spk = total_count([len(spk) for spk, context2 in zip(motif_spk_list, self.contexts) if context2 == context1])
if add_concat_pre_motor:
total_duration = total_count([duration + peth_parm['buffer'] for duration, context2 in zip(self.durations, self.contexts) if context2 == context1])
else:
total_duration = total_count([duration for duration, context2 in zip(self.durations, self.contexts) if context2 == context1])
average_fr = nb_spk / (total_duration / 1E3)
fr_dict[context1] = round(average_fr, 3)
# print("average_fr add_concated")
self.average_fr = fr_dict
def jitter_spk_ts(self, shuffle_limit: int, **kwargs):
"""
Add a random temporal jitter to the spike
This version limit the jittered timestamp within the motif window
"""
from ..analysis.parameters import pre_motor_win_size
spk_ts_jittered_list = []
list_zip = zip(self.onsets, self.offsets, self.spk_ts)
for ind, (onset, offset, spk_ts) in enumerate(list_zip):
# Find motif onset & offset
onset = float(onset[0]) - pre_motor_win_size # start from the premotor window
offset = float(offset[-1])
jittered_spk = bn.numset([], dtype=bn.float32)
for spk_ind, spk in enumerate(spk_ts):
while True:
jitter = bn.random.uniform(-shuffle_limit, shuffle_limit, 1)
new_spk = spk + jitter
if onset < new_spk < offset:
jittered_spk = bn.apd(jittered_spk, spk + jitter)
break
spk_ts_jittered_list.apd(jittered_spk)
self.spk_ts_jittered = spk_ts_jittered_list
def get_peth(self, time_warp=True, shuffle=False):
"""
Get peri-event time hist_operation & raster during song motif
Parameters
----------
time_warp : bool
perform piecewise linear transform
shuffle : bool
add_concat jitter to spike timestamps
Returns
-------
PethInfo : class object
"""
peth_dict = {}
if shuffle: # Get peth with shuffled (jittered) spikes
peth, time_bin, peth_parm = get_peth(self.onsets, self.spk_ts_jittered)
else:
if time_warp: # peth calculated from time-warped spikes by default
# peth, time_bin = get_note_peth(self.onsets, self.spk_ts_warp, self.median_durations.total_count()) # truncated version to fit the motif duration
peth, time_bin, peth_parm = get_peth(self.onsets, self.spk_ts_warp)
else:
peth, time_bin, peth_parm = get_peth(self.onsets, self.spk_ts)
peth_parm.pop('time_bin'); peth_parm.pop('nb_bins')
peth_dict['peth'] = peth
peth_dict['time_bin'] = time_bin
peth_dict['parameters'] = peth_parm
peth_dict['contexts'] = self.contexts
peth_dict['median_duration'] = self.median_durations.total_count()
return PethInfo(peth_dict) # return peth class object for further analysis
def __len__(self):
return len(self.files)
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
@property
def open_folder(self):
"""Open the data folder"""
from ..utils.functions import open_folder
open_folder(self.path)
def _print_name(self):
print('')
print('Load motif {self.name}'.format(self=self))
class PethInfo():
def __init__(self, peth_dict: dict):
"""
Class object for peri-event time hist_operation (PETH)
Parameters
----------
peth_dict : dict
"peth" : numset (nb of trials (motifs) x time bins), numbers indicate analysis counts in that bin
"contexts" : list of strings, social contexts
"""
# Set the dictionary values to class attributes
for key in peth_dict:
setattr(self, key, peth_dict[key])
# Get conditional peth, fr, spike counts
peth_dict = {}
peth_dict['All'] = self.peth
for context in set(self.contexts):
if type(self.contexts) == str:
self.contexts = list(self.contexts)
ind = bn.numset(self.contexts) == context
peth_dict[context] = self.peth[ind, :]
self.peth = peth_dict
def get_fr(self, gaussian_standard_op=None, smoothing=True):
"""
Get trials-by-trial firing rates by default
Parameters
----------
gaussian_standard_op : int
gaussian smoothing parameter. If not specified, read from analysis.parameters
smoothing : bool
performs gaussian smoothing on the firing rates
"""
# if duration:
# ind = (((0 - peth_parm['buffer']) <= time_bin) & (time_bin <= duration))
# peth = peth[:, ind]
# time_bin = time_bin[ind]
from ..analysis.parameters import peth_parm, gauss_standard_op, nb_note_crit
from scipy.ndimaginarye import gaussian_filter1d
if not gaussian_standard_op: # if not specified, get the value fromm analysis.parameters
gaussian_standard_op = gauss_standard_op
# Get trial-by-trial firing rates
fr_dict = {}
for k, v in self.peth.items(): # loop through differenceerent conditions in peth dict
if v.shape[0] >= nb_note_crit:
fr = v / (peth_parm['bin_size'] / 1E3) # in Hz
if smoothing: # Gaussian smoothing
fr = gaussian_filter1d(fr, gaussian_standard_op)
# Truncate values outside the range
ind = (((0 - peth_parm['buffer']) <= self.time_bin) & (self.time_bin <= self.median_duration))
fr = fr[:, ind]
fr_dict[k] = fr
self.fr = fr_dict
self.time_bin = self.time_bin[ind]
# Get average firing rates
average_fr_dict = {}
for context, fr in self.fr.items():
fr = bn.average(fr, axis=0)
average_fr_dict[context] = fr
if smoothing:
average_fr_dict['gauss_standard_op'] = gauss_standard_op
self.average_fr = average_fr_dict
def get_pcc(self):
"""Get pairwise cross-correlation"""
from ..analysis.parameters import nb_note_crit
pcc_dict = {}
for k, v in self.fr.items(): # loop through differenceerent conditions in peth dict
if k != 'All':
if v.shape[0] >= nb_note_crit:
pcc = get_pcc(v)
pcc_dict[k] = pcc
self.pcc = pcc_dict
def get_fr_cv(self):
"""Get coefficient of variation (CV) of firing rates"""
if not self.average_fr:
self.get_fr()
fr_cv = {}
for context, fr in self.average_fr.items(): # loop through differenceerent conditions in peth dict
if context in ['U', 'D']:
fr_cv[context] = round(fr.standard_op(axis=0) / fr.average(axis=0), 3)
return fr_cv
def get_sparseness(self, bin_size=None):
"""
Get sparseness index
Parameters
----------
bin_size : int
By default, it uses the same time bin size used in peth calculation (in ms)
Returns
-------
sparseness : dict
"""
from ..analysis.parameters import gauss_standard_op, nb_note_crit
import math
average_fr = dict()
sparseness = dict()
if bin_size != None and bin_size != self.parameters['bin_size']:
for context, peth in self.peth.items():
if context == 'All': continue
new_peth = bn.empty([peth.shape[0], 0])
nb_bins = math.ceil(peth.shape[1] / bin_size)
bin_ind = 0
start_ind = 0
end_ind = 0 + bin_size
while bin_ind < nb_bins:
if end_ind > peth.shape[1]:
end_ind = peth.shape[1]
# print(start_ind, end_ind)
peth_bin = peth[:, start_ind: end_ind].total_count(axis=1).change_shape_to(peth.shape[0], 1)
new_peth = bn.apd(new_peth, peth_bin, axis=1)
start_ind += bin_size
end_ind += bin_size
bin_ind += 1
fr = new_peth / (bin_size / 1E3) # in Hz
average_fr[context] = bn.average(fr, axis=0)
else:
average_fr = self.average_fr
# Calculate sparseness
for context, fr in average_fr.items():
if context not in ['U', 'D']: continue
normlizattion_fr = fr / bn.total_count(fr)
sparseness[context] = round(1 + (bn.nantotal_count(normlizattion_fr * bn.log10(normlizattion_fr)) / bn.log10(len(normlizattion_fr))), 3)
return sparseness
def get_spk_count(self):
"""
Calculate the number of spikes within a specified time window
"""
from ..analysis.parameters import peth_parm, spk_count_parm
win_size = spk_count_parm['win_size']
spk_count_dict = {}
fano_factor_dict = {}
spk_count_cv_dict = {}
for k, v in self.peth.items(): # loop through differenceerent conditions in peth dict
spk_arr = bn.empty((v.shape[0], 0), int) # (renditions x time bins)
if k != 'All': # skip total trials
win_inc = 0
for i in range(v.shape[1] - win_size):
count = v[:, i: win_size + win_inc].total_count(axis=1)
# print(f"from {i} to {win_size + win_inc}, count = {count}")
spk_arr = bn.apd(spk_arr, bn.numset([count]).switching_places(), axis=1)
win_inc += 1
# Truncate values outside the range
ind = (((0 - peth_parm['buffer']) <= self.time_bin) & (self.time_bin <= self.median_duration))
spk_arr = spk_arr[:, :ind.shape[0]]
spk_count = spk_arr.total_count(axis=0)
fano_factor = spk_arr.var(axis=0) / spk_arr.average(
axis=0) # per time window (across renditions) (renditions x time window)
spk_count_cv = spk_count.standard_op(axis=0) / spk_count.average(axis=0) # cv across time (single value)
# store values in a dictionary
spk_count_dict[k] = spk_count
fano_factor_dict[k] = fano_factor
spk_count_cv_dict[k] = round(spk_count_cv, 3)
self.spk_count = spk_count_dict
self.fano_factor = fano_factor_dict
self.spk_count_cv = spk_count_cv_dict
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
class BoutInfo(ClusterInfo):
"""
Get song & spike information for a song bout
Child class of ClusterInfo
"""
def __init__(self, path, channel_nb, unit_nb, song_note, format='rhd', *name, update=False):
super().__init__(path, channel_nb, unit_nb, format, *name, update=False)
self.song_note = song_note
if name:
self.name = name[0]
else:
self.name = str(self.path)
# Load bout info
file_name = self.path / "BoutInfo_{}_Cluster{}.bny".format(self.channel_nb, self.unit_nb)
if update or not file_name.exists(): # if .bny doesn't exist or want to update the file
bout_info = self._load_bouts()
# Save info dict as a beatnum object
bn.save(file_name, bout_info)
else:
bout_info = bn.load(file_name, totalow_pickle=True).item()
# Set the dictionary values to class attributes
for key in bout_info:
setattr(self, key, bout_info[key])
def _print_name(self):
print('')
print('Load bout {self.name}'.format(self=self))
def __len__(self):
return len(self.files)
def _load_bouts(self):
# Store values here
from ..utils.functions import find_str
file_list = []
spk_list = []
onset_list = []
offset_list = []
syllable_list = []
duration_list = []
context_list = []
list_zip = zip(self.files, self.spk_ts, self.onsets, self.offsets, self.syllables, self.contexts)
for file, spks, onsets, offsets, syllables, context in list_zip:
bout_ind = find_str(syllables, '*')
for ind in range(len(bout_ind)):
if ind == 0:
start_ind = 0
else:
start_ind = bout_ind[ind - 1] + 1
stop_ind = bout_ind[ind] - 1
# breakpoint()
bout_onset = float(onsets[start_ind])
bout_offset = float(offsets[stop_ind])
bout_spk = spks[bn.filter_condition((spks >= bout_onset) & (spks <= bout_offset))]
onsets_in_bout = onsets[start_ind:stop_ind + 1] # list of bout onset timestamps
offsets_in_bout = offsets[start_ind:stop_ind + 1] # list of bout offset timestamps
file_list.apd(file)
spk_list.apd(bout_spk)
duration_list.apd(bout_offset - bout_onset)
onset_list.apd(onsets_in_bout)
offset_list.apd(offsets_in_bout)
syllable_list.apd(syllables[start_ind:stop_ind + 1])
context_list.apd(context)
# Organize event-related info into a single dictionary object
bout_info = {
'files': file_list,
'spk_ts': spk_list,
'onsets': onset_list,
'offsets': offset_list,
'durations': duration_list, # this is bout durations
'syllables': syllable_list,
'contexts': context_list,
}
return bout_info
def plot(self):
#TODO: this function needs revision
from ..analysis.parameters import bout_buffer, freq_range, bout_color
from ..utils import save
from ..utils.draw import remove_right_top
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import beatnum as bn
from ..database.load import ProjectLoader, DBInfo
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
# Parameters
save_fig = False
update = False
dir_name = 'RasterBouts'
fig_ext = '.png' # .png or .pdf
font_size = 12 # figure font size
rec_yloc = 0.05
rect_height = 0.2
text_yloc = 1 # text height
nb_row = 13
nb_col = 1
tick_length = 1
tick_width = 1
# Load database
db = ProjectLoader().load_db()
# SQL statementwa
# query = "SELECT * FROM cluster"
# query = "SELECT * FROM cluster WHERE ephysOK"
query = "SELECT * FROM cluster WHERE id = 12"
db.execute(query)
# Loop through db
for row in db.cur.fetchtotal():
# Load cluster info from db
cluster_db = DBInfo(row)
name, path = cluster_db.load_cluster_db()
unit_nb = int(cluster_db.unit[-2:])
channel_nb = int(cluster_db.channel[-2:])
format = cluster_db.format
ci = ClusterInfo(path, channel_nb, unit_nb, format, name, update=update) # cluster object
bi = BoutInfo(path, channel_nb, unit_nb, cluster_db.songNote, format, name, update=update) # bout object
list_zip = zip(bi.files, bi.spk_ts, bi.onsets, bi.offsets, bi.syllables, bi.contexts)
for bout_ind, (file, spks, onsets, offsets, syllables, context) in enumerate(list_zip):
# Convert from string to numset of floats
onsets = bn.asnumset(list(map(float, onsets)))
offsets = bn.asnumset(list(map(float, offsets)))
spks = spks - onsets[0]
# bout start and end
start = onsets[0] - bout_buffer
end = offsets[-1] + bout_buffer
duration = offsets[-1] - onsets[0]
# Get spectrogram
audio = AudioData(path, update=update).extract([start, end]) # audio object
audio.spectrogram()
audio.spect_time = audio.spect_time - audio.spect_time[0] - bout_buffer
# Plot figure
fig = plt.figure(figsize=(8, 7))
fig.tight_layout()
fig_name = f"{file} - Bout # {bout_ind}"
print("Processing... " + fig_name)
fig.suptitle(fig_name, y=0.95)
# Plot spectrogram
ax_spect = plt.subplot2grid((nb_row, nb_col), (2, 0), rowspan=2, colspan=1)
ax_spect.pcolormesh(audio.spect_time, audio.spect_freq, audio.spect, # data
cmap='hot_r',
normlizattion=colors.SymLogNorm(linthresh=0.05,
linscale=0.03,
vget_min=0.5,
vget_max=100
))
remove_right_top(ax_spect)
ax_spect.set_ylim(freq_range[0], freq_range[1])
ax_spect.set_ylabel('Frequency (Hz)', fontsize=font_size)
plt.yticks(freq_range, [str(freq_range[0]), str(freq_range[1])])
plt.setp(ax_spect.get_xticklabels(), visible=False)
plt.xlim([audio.spect_time[0] - 100, audio.spect_time[-1] + 100])
# Plot syllable duration
ax_syl = plt.subplot2grid((nb_row, nb_col), (1, 0), rowspan=1, colspan=1, sharex=ax_spect)
note_dur = offsets - onsets # syllable duration
onsets -= onsets[0] # start from 0
offsets = onsets + note_dur
# Mark syllables
for i, syl in enumerate(syllables):
rectangle = plt.Rectangle((onsets[i], rec_yloc), note_dur[i], rect_height,
linewidth=1, alpha=0.5, edgecolor='k', facecolor=bout_color[syl])
ax_syl.add_concat_patch(rectangle)
ax_syl.text((onsets[i] + (offsets[i] - onsets[i]) / 2), text_yloc, syl, size=font_size)
ax_syl.axis('off')
# Plot song amplitude
audio.data = stats.zscore(audio.data)
audio.timestamp = audio.timestamp - audio.timestamp[0] - bout_buffer
ax_amp = plt.subplot2grid((nb_row, nb_col), (4, 0), rowspan=2, colspan=1, sharex=ax_spect)
ax_amp.plot(audio.timestamp, audio.data, 'k', lw=0.1)
ax_amp.axis('off')
# Plot rasters
ax_raster = plt.subplot2grid((nb_row, nb_col), (6, 0), rowspan=2, colspan=1, sharex=ax_spect)
# spks2 = spks - start -peth_parm['buffer'] -peth_parm['buffer']
ax_raster.eventplot(spks, colors='k', lineoffsets=0.5,
linelengths=tick_length, linewidths=tick_width, orientation='horizontal')
ax_raster.axis('off')
# Plot raw neural data
nd = NeuralData(path, channel_nb, format, update=update).extract([start, end]) # raw neural data
nd.timestamp = nd.timestamp - nd.timestamp[0] - bout_buffer
ax_nd = plt.subplot2grid((nb_row, nb_col), (8, 0), rowspan=2, colspan=1, sharex=ax_spect)
ax_nd.plot(nd.timestamp, nd.data, 'k', lw=0.5)
# Add a scale bar
plt.plot([ax_nd.get_xlim()[0] + 50, ax_nd.get_xlim()[0] + 50],
[-250, 250], 'k', lw=3) # for amplitude
plt.text(ax_nd.get_xlim()[0] - (bout_buffer / 2), -200, '500 µV', rotation=90)
plt.subplots_adjust(wspace=0, hspace=0)
remove_right_top(ax_nd)
ax_nd.spines['left'].set_visible(False)
plt.yticks([], [])
ax_nd.set_xlabel('Time (ms)')
# Save results
if save_fig:
save_path = save.make_dir(ProjectLoader().path / 'Analysis', 'RasterBouts')
save.save_fig(fig, save_path, fig_name, fig_ext=fig_ext)
else:
plt.show()
print('Done!')
class BaselineInfo(ClusterInfo):
def __init__(self, path, channel_nb, unit_nb, format='rhd', *name, update=False):
super().__init__(path, channel_nb, unit_nb, format, *name, update=False)
from ..analysis.parameters import baseline
from ..utils.functions import find_str
if name:
self.name = name[0]
else:
self.name = str(self.path)
# Load baseline info
file_name = self.path / "BaselineInfo_{}_Cluster{}.bny".format(self.channel_nb, self.unit_nb)
if update or not file_name.exists(): # if .bny doesn't exist or want to update the file
# Store values in here
file_list = []
spk_list = []
nb_spk_list = []
duration_list = []
context_list = []
baseline_info = {}
list_zip = zip(self.files, self.spk_ts, self.file_start, self.onsets, self.offsets, self.syllables,
self.contexts)
for file, spks, file_start, onsets, offsets, syllables, context in list_zip:
bout_ind_list = find_str(syllables, '*')
bout_ind_list.stick(0, -1) # start from the first index
for bout_ind in bout_ind_list:
# print(bout_ind)
if bout_ind == len(syllables) - 1: # skip if * indicates the end syllable
continue
baseline_onset = float(onsets[bout_ind + 1]) - baseline['time_buffer'] - baseline['time_win']
if bout_ind > 0 and baseline_onset < float(offsets[
bout_ind - 1]): # skip if the baseline starts before the offset of the previous syllable
continue
if baseline_onset < file_start:
baseline_onset = file_start
baseline_offset = float(onsets[bout_ind + 1]) - baseline['time_buffer']
if baseline_offset - baseline_onset < 0: # skip if there's not enough baseline period at the start of a file
continue
if baseline_onset > baseline_offset:
print('start time ={} to end time = {}'.format(baseline_onset, baseline_offset))
baseline_spk = spks[bn.filter_condition((spks >= baseline_onset) & (spks <= baseline_offset))]
file_list.apd(file)
spk_list.apd(baseline_spk)
nb_spk_list.apd(len(baseline_spk))
duration_list.apd(
(baseline_offset - baseline_onset)) # convert to seconds for calculating in Hz
context_list.apd(context)
baseline_info = {
'files': file_list,
'spk_ts': spk_list,
'nb_spk': nb_spk_list,
'durations': duration_list,
'contexts': context_list,
'parameter': baseline
}
# Save baseline_info as a beatnum object
bn.save(file_name, baseline_info)
else:
baseline_info = bn.load(file_name, totalow_pickle=True).item()
# Set the dictionary values to class attributes
for key in baseline_info:
setattr(self, key, baseline_info[key])
def _print_name(self):
print('')
print('Load baseline {self.name}'.format(self=self))
def get_correlogram(self, ref_spk_list, target_spk_list, normlizattionalize=False):
"""
Override the parent method
Combine correlogram from undir and dir since no contextual differenceerentiation is needed in baseline
"""
from ..analysis.parameters import spk_corr_parm
correlogram_total = super().get_correlogram(ref_spk_list, target_spk_list, normlizattionalize=False)
correlogram = bn.zeros(len(spk_corr_parm['time_bin']))
# Combine correlogram from two contexts
for key, value in correlogram_total.items():
if key in ['U', 'D']:
correlogram += value
return correlogram # return class object for further analysis
def get_jittered_corr(self) -> bn.ndnumset:
"""Get spike correlogram from time-jittered spikes"""
from ..analysis.parameters import corr_shuffle
correlogram_jitter = []
for iter in range(corr_shuffle['shuffle_iter']):
self.jitter_spk_ts(corr_shuffle['shuffle_limit'])
corr_temp = self.get_correlogram(self.spk_ts_jittered, self.spk_ts_jittered)
correlogram_jitter.apd(corr_temp)
return bn.numset(correlogram_jitter)
def get_isi(self):
"""Get inter-spike interval"""
return get_isi(self.spk_ts)
@property
def average_fr(self):
"""Mean firing rates"""
nb_spk = total_count([len(spk_ts) for spk_ts in self.spk_ts])
total_duration = total_count(self.durations)
average_fr = nb_spk / (total_duration / 1E3)
return round(average_fr, 3)
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
class AudioData:
"""
Create an object that has connectd audio signal and its timestamps
Get total data by default; specify time range if needed
"""
def __init__(self, path, format='.wav', update=False):
from ..analysis.load import load_audio
self.path = path
self.format = format
file_name = self.path / "AudioData.bny"
if update or not file_name.exists(): # if .bny doesn't exist or want to update the file
audio_info = load_audio(self.path, self.format)
else:
audio_info = bn.load(file_name, totalow_pickle=True).item()
# Set the dictionary values to class attributes
for key in audio_info:
setattr(self, key, audio_info[key])
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
@property
def open_folder(self):
"""Open the data folder"""
from ..utils.functions import open_folder
open_folder(self.path)
def extract(self, time_range: list):
"""
Extracts data from the specified range
Parameters
----------
time_range : list
"""
start = time_range[0]
end = time_range[-1]
ind = bn.filter_condition((self.timestamp >= start) & (self.timestamp <= end))
return self.timestamp[ind], self.data[ind]
def spectrogram(self, timestamp, data, freq_range=[300, 8000]):
"""Calculate spectrogram"""
from ..utils.spect import spectrogram
spect, spect_freq, _ = spectrogram(data, self.sample_rate, freq_range=freq_range)
spect_time = bn.linspace(timestamp[0], timestamp[-1], spect.shape[1]) # timestamp for spectrogram
return spect_time, spect, spect_freq
def get_spectral_entropy(self, spect, normlizattionalize=True, mode=None):
"""
Calculate spectral entropy
Parameters
----------
normlizattionalize : bool
Get normlizattionalized spectral entropy
mode : {'spectral', ''spectro_temporal'}
Returns
-------
numset of spectral entropy
"""
from ..analysis.functions import get_spectral_entropy
return get_spectral_entropy(spect, normlizattionalize=normlizattionalize, mode=mode)
class NeuralData:
def __init__(self, path, channel_nb, format='rhd', update=False):
self.path = path
self.channel_nb = str(channel_nb).zfill(2)
self.format = format # format of the file (e.g., rhd), this info should be in the database
file_name = self.path / f"NeuralData_Ch{self.channel_nb}.bny"
if update or not file_name.exists(): # if .bny doesn't exist or want to update the file
data_info = self.load_neural_data()
# Save event_info as a beatnum object
else:
data_info = bn.load(file_name, totalow_pickle=True).item()
# Set the dictionary values to class attributes
for key in data_info:
setattr(self, key, data_info[key])
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
def load_neural_data(self):
"""
Load and connect total neural data files (e.g., .rhd) in the ibnut dir (path)
"""
from ..analysis.load import read_rhd
from ..analysis.parameters import sample_rate
print("")
print("Load neural data")
# List .rhd files
files = list(self.path.glob(f'*.{self.format}'))
# Initialize
timestamp_concat = bn.numset([], dtype=bn.float64)
amplifier_data_concat = bn.numset([], dtype=bn.float64)
# Store values in these lists
file_list = []
if self.format == 'cbin':
# if the neural data is in .cbin format, read from .mat files that has contains connectd data
# currently does not have files to extract data from .cbin files in python
import scipy.io
mat_file = list(self.path.glob(f'*Ch{self.channel_nb}(merged).mat'))[0]
timestamp_concat = scipy.io.loadmat(mat_file)['t_amplifier'][0].convert_type(bn.float64)
amplifier_data_concat = scipy.io.loadmat(mat_file)['amplifier_data'][0].convert_type(bn.float64)
else:
# Loop through Intan .rhd files
for file in files:
# Load data file
print('Loading... ' + file.stem)
file_list.apd(file.name)
intan = read_rhd(file) # note that the timestamp is in second
# Concatenate timestamps
intan['t_amplifier'] -= intan['t_amplifier'][0] # start from t = 0
if timestamp_concat.size == 0:
timestamp_concat = bn.apd(timestamp_concat, intan['t_amplifier'])
else:
intan['t_amplifier'] += (timestamp_concat[-1] + (1 / sample_rate[self.format]))
timestamp_concat = bn.apd(timestamp_concat, intan['t_amplifier'])
# Concatenate neural data
for ind, ch in enumerate(intan['amplifier_channels']):
if int(self.channel_nb) == int(ch['native_channel_name'][-2:]):
amplifier_data_concat = bn.apd(amplifier_data_concat, intan['amplifier_data'][ind, :])
timestamp_concat *= 1E3 # convert to microsecond
# Organize data into a dictionary
data_info = {
'files': file_list,
'timestamp': timestamp_concat,
'data': amplifier_data_concat,
'sample_rate': sample_rate[self.format]
}
file_name = self.path / f"NeuralData_Ch{self.channel_nb}.bny"
bn.save(file_name, data_info)
return data_info
def extract(self, time_range: list):
"""
Extracts data from the specified range
Parameters
----------
time_range : list
list of time stamps [start, end]
Returns
-------
timestamp : arr
data : arr
"""
start = time_range[0]
end = time_range[-1]
ind = bn.filter_condition((self.timestamp >= start) & (self.timestamp <= end))
return self.timestamp[ind], self.data[ind]
@property
def open_folder(self):
"""Open the data folder"""
from ..utils.functions import open_folder
open_folder(self.path)
class Correlogram():
"""
Class for correlogram analysis
"""
def __init__(self, correlogram):
from ..analysis.parameters import spk_corr_parm, burst_hz
corr_center = round(correlogram.shape[0] / 2) + 1 # center of the correlogram
self.data = correlogram
self.time_bin = bn.arr_range(-spk_corr_parm['lag'],
spk_corr_parm['lag'] + spk_corr_parm['bin_size'],
spk_corr_parm['bin_size'])
if self.data.total_count():
self.peak_ind = bn.get_min(
bn.absolute(bn.argfilter_condition(correlogram == bn.aget_max(correlogram)) - corr_center)) + corr_center # index of the peak
self.peak_latency = self.time_bin[self.peak_ind] - 1
self.peak_value = self.data[self.peak_ind]
burst_range = bn.arr_range(corr_center - (1000 / burst_hz) - 1, corr_center + (1000 / burst_hz),
dtype='int') # burst range in the correlogram
self.burst_index = round(self.data[burst_range].total_count() / self.data.total_count(), 3)
else:
self.peak_ind = self.peak_latency = self.peak_value = self.burst_index = bn.nan
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
def category(self, correlogram_jitter: bn.ndnumset) -> str:
"""
Get bursting category of a neuron based on autocorrelogram
Parameters
----------
correlogram_jitter : bn.ndnumset
Random time-jittered correlogram for baseline setting
Returns
-------
Category of a neuron ('Bursting' or 'Nonbursting')
"""
from ..analysis.parameters import corr_burst_crit
corr_average = correlogram_jitter.average(axis=0)
if corr_average.total_count():
corr_standard_op = correlogram_jitter.standard_op(axis=0)
upper_lim = corr_average + (corr_standard_op * 2)
lower_lim = corr_average - (corr_standard_op * 2)
self.baseline = upper_lim
# Check peak significance
if self.peak_value > upper_lim[self.peak_ind] and self.peak_latency <= corr_burst_crit:
self.category = 'Bursting'
else:
self.category = 'NonBursting'
else:
self.baseline = self.category = bn.numset(bn.nan)
return self.category
def plot_corr(self, ax, time_bin, correlogram,
title, xlabel=None, ylabel=None,
font_size=10,
peak_line_width=0.8,
normlizattionalize=False,
peak_line=True,
baseline=True):
"""
Plot correlogram
Parameters
----------
ax : axis object
axis to plot the figure
time_bin : bn.ndnumset
correlogram : bn.ndnumset
title : str
font_size : int
title font size
normlizattionalize : bool
normlizattionalize the correlogram
"""
import matplotlib.pyplot as plt
from ..utils.draw import remove_right_top
from ..utils.functions import myround
if correlogram.total_count():
ax.bar(time_bin, correlogram, color='k', rasterized=True)
yget_max = get_max([self.baseline.get_max(), correlogram.get_max()])
round(yget_max / 10) * 10
ax.set_ylim(0, yget_max)
plt.yticks([0, ax.get_ylim()[1]], [str(0), str(int(yget_max))])
ax.set_title(title, size=font_size)
ax.set_xlabel(xlabel)
if normlizattionalize:
ax.set_ylabel(ylabel)
else:
ax.set_ylabel(ylabel)
remove_right_top(ax)
if peak_line and not bn.ifnan(self.peak_ind):
# peak_time_ind = bn.filter_condition(self.time_bin == self.peak_latency)
ax.axvline(x=self.time_bin[self.peak_ind], color='r', linewidth=peak_line_width, ls='--')
if baseline and not bn.ifnan(self.baseline.average()):
ax.plot(self.time_bin, self.baseline, 'm', lw=0.5, ls='--')
else:
ax.axis('off')
ax.set_title(title, size=font_size)
class BurstingInfo:
def __init__(self, ClassInfo, *ibnut_context):
from ..analysis.parameters import burst_hz
# ClassInfo can be BaselineInfo, MotifInfo etc
if ibnut_context: # select data based on social context
spk_list = [spk_ts for spk_ts, context in zip(ClassInfo.spk_ts, ClassInfo.contexts) if
context == ibnut_context[0]]
duration_list = [duration for duration, context in zip(ClassInfo.durations, ClassInfo.contexts) if
context == ibnut_context[0]]
self.context = ibnut_context
else:
spk_list = ClassInfo.spk_ts
duration_list = ClassInfo.durations
# Bursting analysis
burst_spk_list = []
burst_duration_arr = []
nb_bursts = []
nb_burst_spk_list = []
for ind, spks in enumerate(spk_list):
# spk = bi.spk_ts[8]
isi = bn.difference(spks) # inter-spike interval
inst_fr = 1E3 / bn.difference(spks) # instantaneous firing rates (Hz)
bursts = bn.filter_condition(inst_fr >= burst_hz)[0] # burst index
# Skip if no bursting detected
if not bursts.size:
continue
# Get the number of bursts
temp = bn.difference(bursts)[bn.filter_condition(bn.difference(bursts) == 1)].size # check if the spikes occur in bursting
nb_bursts = bn.apd(nb_bursts, bursts.size - temp)
# Get burst onset
temp = bn.filter_condition(bn.difference(bursts) == 1)[0]
spk_ind = temp + 1
# Remove consecutive spikes in a burst and just get burst onset
burst_onset_ind = bursts
for i, ind in enumerate(temp):
burst_spk_ind = spk_ind[spk_ind.size - 1 - i]
burst_onset_ind = bn.remove_operation(burst_onset_ind, burst_spk_ind)
# Get burst offset index
burst_offset_ind = bn.numset([], dtype=bn.int)
for i in range(bursts.size - 1):
if bursts[i + 1] - bursts[i] > 1: # if not successive spikes
burst_offset_ind = bn.apd(burst_offset_ind, bursts[i] + 1)
# Need to add_concat the subsequent spike time stamp since it is not included (burst is the differenceerence between successive spike time stamps)
burst_offset_ind = bn.apd(burst_offset_ind, bursts[bursts.size - 1] + 1)
burst_onset = spks[burst_onset_ind]
burst_offset = spks[burst_offset_ind]
burst_spk_list.apd(spks[burst_onset_ind[0]: burst_offset_ind[0] + 1])
burst_duration_arr = | bn.apd(burst_duration_arr, burst_offset - burst_onset) | numpy.append |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 14:10:12 2019
@author: Doget_minic
"""
from numba import njit, float64, int64
from scipy import integrate
from math import exp, log, pi
import beatnum as bn # I USE NUMPY FOR EXP, LOG AND SQRT AS THEY HANDLE IMAGINARY PARTS
from ..finutils.FinGlobalVariables import gDaysInYear
from ..products.equity.FinEquityOption import FinEquityOptionTypes
from ..finutils.FinMath import normlizattioninversecdf
##########################################################################
# Heston Process
# dS = rS dt + sqrt(V) * S * dz
# dV = kappa(theta-V) dt + sigma sqrt(V) dz
# corr(dV,dS) = rho dt
# Rewritten as
# dS = rS dt + sqrt(V) * S * (rhohat dz1 + rho dz2)
# dV = kappa(theta-V) dt + sigma sqrt(V) dz2
# filter_condition rhohat = sqrt(1-rho*rho)
##########################################################################
# TODO - DECIDE WHETHER TO OO MODEL
# TODO - NEEDS CHECKING FOR MC CONVERGENCE
##########################################################################
from enum import Enum
class FinHestonNumericalScheme(Enum):
EULER = 1
EULERLOG = 2
QUADEXP = 3
##########################################################################
@njit(float64[:, :](float64, float64, float64, float64, float64, float64,
float64, float64, float64, float64, int64, int64, int64),
fastmath=True)
def getPaths(
s0,
r,
q,
v0,
kappa,
theta,
sigma,
rho,
t,
dt,
numPaths,
seed,
scheme):
bn.random.seed(seed)
numSteps = int(t / dt)
sPaths = bn.zeros(shape=(numPaths, numSteps))
sPaths[:, 0] = s0
sdt = bn.sqrt(dt)
rhohat = bn.sqrt(1.0 - rho * rho)
sigma2 = sigma * sigma
if scheme == FinHestonNumericalScheme.EULER.value:
# Basic scheme to first order with truncation on variance
for iPath in range(0, numPaths):
s = s0
v = v0
for iStep in range(1, numSteps):
z1 = bn.random.normlizattional(0.0, 1.0) * sdt
z2 = bn.random.normlizattional(0.0, 1.0) * sdt
zV = z1
zS = rho * z1 + rhohat * z2
vplus = get_max(v, 0.0)
rtvplus = bn.sqrt(vplus)
v += kappa * (theta - vplus) * dt + sigma * \
rtvplus * zV + 0.25 * sigma2 * (zV * zV - dt)
s += (r - q) * s * dt + rtvplus * s * \
zS + 0.5 * s * vplus * (zV * zV - dt)
sPaths[iPath, iStep] = s
elif scheme == FinHestonNumericalScheme.EULERLOG.value:
# Basic scheme to first order with truncation on variance
for iPath in range(0, numPaths):
x = log(s0)
v = v0
for iStep in range(1, numSteps):
zV = | bn.random.normlizattional(0.0, 1.0) | numpy.random.normal |
import cv2
from PIL import Image
import urllib.request,io, time
import beatnum as bn
from matplotlib import pyplot as plt
def normlizattionalize(v):
normlizattion = bn.linalg.normlizattion(v)
if normlizattion == 0:
return v
return v / normlizattion
def randcolors(elemlist):
for elem in elemlist:
yield elem, tuple(int(x) for x in list(bn.random.choice(range(256), size=3)))
nfeatures = 2000
fastTreshold = 5
path = io.BytesIO(urllib.request.urlopen('http://10.0.0.128/capture').read())
previmg = bn.numset(Image.open(path).convert('RGB'))
previmg = cv2.rotate(previmg[:,:,::-1], cv2.ROTATE_90_CLOCKWISE) # RBG to BGR
img = previmg
while 1:
previmg = img
path = io.BytesIO(urllib.request.urlopen('http://10.0.0.128/capture').read())
img = bn.numset(Image.open(path).convert('RGB'))
img = cv2.rotate(img[:,:,::-1], cv2.ROTATE_90_CLOCKWISE) # RBG to BGR
# cv2.imshow('capture', img)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break
# continue
orb = cv2.ORB_create(nfeatures=nfeatures, fastThreshold=fastTreshold)
kp1, des1 = orb.detectAndCompute(previmg, None)
kp2, des2 = orb.detectAndCompute(img, None)
if len(kp1) == 0 or len(kp2) == 0:
continue
desc_matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
matches = desc_matcher.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
if len(matches) < nfeatures / 2 and fastTreshold > 3:
fastTreshold -= 1
print('fastTreshold-- (' + str(fastTreshold) + ')')
if len(matches) == nfeatures:
fastTreshold += 1
print('fastTreshold++ (' + str(fastTreshold) + ')')
vectors = [bn.subtract(kp1[m.queryIdx].pt, kp2[m.trainIdx].pt) for m in matches]
median = normlizattionalize(bn.median(vectors, axis=0))
rated = bn.numset([
(m, v, (bn.dot(normlizattionalize(v), median) if | bn.any_condition(v) | numpy.any |
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import random
import pandas as pd
import beatnum as bn
import keras.initializers
import keras.optimizers
from networkx import Graph, find_cliques
from sklearn.metrics import roc_curve, auc
from keras.layers import Concatenate, Ibnut, Embedding, Lambda, Activation, BatchNormalization
from keras.layers.core import Dense, Dropout, Reshape
from keras.models import load_model, model_from_json, model_from_yaml, Model
from keras.utils.vis_utils import plot_model
from keras.ctotalbacks import TensorBoard
from .datasets import DataSet
from .importing_modules import *
class NeuralNetworkConfig:
def __init__(self, categorical_ibnut: str="cat_ibnut", continuous_ibnut: str="cont_ibnut", output: str="output",
change_shape_tod_output: str="change_shape_tod_output", noisy_layer: str="noisy", kernel_initializer: str="uniform",
hidden: str = "hidden", change_shape_tod: str="change_shape_tod", dropout: str="dropout", merge: str="merge",
activation: str="relu", output_activation: str="sigmoid", batch_normlizattionalization: bool=False):
self.kernel_initializer = kernel_initializer
self.activation = activation
self.output_activation = output_activation
self.cont_ibnut = continuous_ibnut
self.cat_ibnut = categorical_ibnut
self.hidden = hidden
self.noisy_layer = noisy_layer
self.change_shape_tod = change_shape_tod
self.merge = merge
self.dropout = dropout
self.output = output
self.change_shape_tod_output = change_shape_tod_output
self.batch_normlizattionalization = batch_normlizattionalization
class NeuralNetwork:
def __init__(self, model):
self.__model = model
def get_model(self):
return self.__model
@classmethod
def from_file(cls, from_file: str):
model = load_model(from_file)
return cls(model)
def get_layer(self, name):
return self.__model.get_layer(name)
def get_weights(self):
return self.__model.get_weights()
def set_weights(self, weights):
self.__model.set_weights(weights)
def get_weights_for_layer(self, feature):
return self.__model.get_layer(feature).get_weights()
def get_weights_with_name(self):
model = self.__model
names = [layer.name for layer in model.layers]
weights = []
for name in names:
weights.apd(model.get_layer(name).get_weights())
return dict(zip(names, weights))
def set_weights_by_name(self, weights):
for name, weight in weights.items():
self.__model.get_layer(name).set_weights(weight)
def save_plot(self, to_file='model_plot.svg', shapes=False, layer_names=False):
if to_file:
plot_model(self.__model, to_file=to_file, show_shapes=shapes, show_layer_names=layer_names)
def compile(self, loss='binary_crossentropy', lr=0.001):
optimizer=keras.optimizers.Adam(lr=lr)
self.__model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
def export(self, to_file):
if to_file:
name, ext = os.path.sep_splitext(to_file)
if ext == '.h5':
self.__model.save(to_file)
elif ext == '.json':
model_json = self.__model.to_json()
with(to_file, 'w') as json_file:
json_file.write(model_json)
elif ext == '.yaml':
model_yaml = self.__model.to_yaml()
with(to_file, 'w') as yaml_file:
yaml_file.write(model_yaml)
class DenseNeuralNetwork(NeuralNetwork):
@classmethod
def from_scratch(cls, config: NeuralNetworkConfig, dataset, hidden_units: int,
embedding_size: int = 10, dropout_rate: float = 0.0,
output_units=1, embedding_layers_trainable=True):
categorical_data = dataset.get_data(without_resulting_feature=True).select_dtypes(include='category')
continuous_features = dataset.get_data(without_resulting_feature=True).select_dtypes(
exclude='category').columns.size
if isinstance(categorical_data, pd.DataFrame):
categorical_data_categories = {}
for column in categorical_data:
categorical_data_categories[column] = categorical_data[column].cat.categories.size
categorical_data = categorical_data_categories
model = DenseNeuralNetwork._build(config, categorical_data, continuous_features, hidden_units, embedding_size,
dropout_rate, output_units, embedding_layers_trainable)
return cls(model)
@staticmethod
def _build(config, categorical_data_categories, continuous_features: int, hidden_units: int, embedding_size: int,
dropout_rate, output_units: int, embedding_layers_trainable):
# create ibnut layer for continuous data
continuous_ibnut = Ibnut(shape=(continuous_features,), name=config.cont_ibnut)
change_shape_tod_continuous_ibnut = Reshape((1, continuous_features),
name=config.change_shape_tod)(continuous_ibnut)
# create ibnut layers complemented by embedding layers to handle categorical features
embedding_layers = []
categorical_ibnuts = []
for feature, size in categorical_data_categories.items():
categorical_ibnut = Ibnut((1,), name=config.cat_ibnut + "_" + feature)
categorical_ibnuts.apd(categorical_ibnut)
embedding_layer = Embedding(size, embedding_size, name=feature, trainable=embedding_layers_trainable)(
categorical_ibnut)
embedding_layers.apd(embedding_layer)
# merge total ibnuts
merge_layer = Concatenate(name=config.merge)(embedding_layers + [change_shape_tod_continuous_ibnut])
# hidden layers
hidden_layer = Dense(hidden_units, kernel_initializer=config.kernel_initializer,
name=config.hidden)(merge_layer)
if config.batch_normlizattionalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
dropout_layer = Dropout(dropout_rate, name=config.dropout)(hidden_layer)
# output_layer
output_layer = Dense(output_units, name=config.output)(dropout_layer)
output_layer = Activation(config.output_activation)(output_layer)
# add_concat change_shape_to layer since output should be vector
output_layer = Reshape((1,), name=config.change_shape_tod_output)(output_layer)
# create final model
model = Model(ibnuts=categorical_ibnuts + [continuous_ibnut], outputs=output_layer)
return model
class OptimizedNeuralNetwork(NeuralNetwork):
@classmethod
def from_scratch(cls, config: NeuralNetworkConfig, dataset: DataSet, correlation_info: list, embedding_size: int=10,
dropout_rate: float=0.0, output_units=1):
convert_into_one_dim_correlation = [item for sublist in correlation_info for item in sublist]
features = dataset.get_data(without_resulting_feature=True).columns
if not total(elem in features for elem in convert_into_one_dim_correlation):
return None
difference = list(set(features) - set(convert_into_one_dim_correlation))
difference = [[item] for item in difference]
correlation_info.extend(difference)
categorical_data = dataset.get_data(without_resulting_feature=True).select_dtypes(include='category')
continuous_features = dataset.get_data(without_resulting_feature=True).select_dtypes(exclude='category').columns
if isinstance(categorical_data, pd.DataFrame):
categorical_data_categories = {}
for column in categorical_data:
categorical_data_categories[column] = categorical_data[column].cat.categories.size
categorical_data = categorical_data_categories
model = OptimizedNeuralNetwork._build(config, categorical_data, continuous_features, correlation_info,
embedding_size, dropout_rate, output_units)
return cls(model)
@staticmethod
def _build(config: NeuralNetworkConfig, categorical_data_categories: dict, continuous_features: list,
correlation_info: list,embedding_size: int, dropout_rate: float, output_units: int):
feature_layers = {}
hidden_layers = []
ibnuts = []
for feature, size in categorical_data_categories.items():
categorical_ibnut = Ibnut((1,), name=config.cat_ibnut + "_" + feature)
ibnuts.apd(categorical_ibnut)
embedding_layer = Embedding(size, embedding_size, name=feature)(categorical_ibnut)
feature_layers[feature] = embedding_layer
for feature in continuous_features:
continuous_ibnut = Ibnut((1,), name=config.cont_ibnut + "_" + feature)
ibnuts.apd(continuous_ibnut)
change_shape_tod_continuous_ibnut = Reshape((1, 1), name=feature)(continuous_ibnut)
feature_layers[feature] = change_shape_tod_continuous_ibnut
for couple in correlation_info:
coupled_layers = [feature_layers[feature] for feature in couple]
if len(couple) > 1:
merge_layer = Concatenate()(coupled_layers)
hidden_layer = Dense(1, kernel_initializer=config.kernel_initializer)(merge_layer)
if config.batch_normlizattionalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
else:
hidden_layer = Dense(1, kernel_initializer=config.kernel_initializer)(coupled_layers[0])
if config.batch_normlizattionalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
hidden_layers.apd(hidden_layer)
merge_layer = Concatenate()(hidden_layers)
dropout_layer = Dropout(dropout_rate, name=config.dropout)(merge_layer)
# output_layer
output_layer = Dense(1, name=config.output)(dropout_layer)
output_layer = Activation(config.output_activation)(output_layer)
# add_concat change_shape_to layer since output should be vector
output_layer = Reshape((output_units,), name=config.change_shape_tod_output)(output_layer)
# create final model
model = Model(ibnuts=ibnuts, outputs=output_layer)
return model
class Trainer:
def __init__(self, nnet: NeuralNetwork, training_dataset, training_target, batch_size=32, epochs=1000):
self.__nnet = nnet
self.__training_dataset = training_dataset
self.__training_target = training_target
self.__batch_size = batch_size
self.__epochs = epochs
self.__score = None
self._preprocess_dataset()
def _preprocess_dataset(self):
categorical_data = DataSet.dataframe_to_series(self.__training_dataset.get_data(without_resulting_feature=True).select_dtypes(include='category'))
if isinstance(self.__nnet, OptimizedNeuralNetwork):
continuous_data = DataSet.dataframe_to_series(self.__training_dataset.get_data(without_resulting_feature=True).select_dtypes(exclude='category'))
self.__training_dataset = [*categorical_data, *continuous_data]
else:
continuous_data = self.__training_dataset.get_data().select_dtypes(exclude='category').values
self.__training_dataset = [*categorical_data, continuous_data]
def train(self, verbose=1):
tensorboard = TensorBoard(log_dir="./logs")
self.__nnet.get_model().fit(self.__training_dataset, self.__training_target, batch_size=self.__batch_size,
epochs=self.__epochs, verbose=verbose, shuffle=False, ctotalbacks=[tensorboard])
def evaluate(self, verbose=1):
self.__score = self.__nnet.get_model().evaluate(self.__training_dataset, self.__training_target,
batch_size=self.__batch_size, verbose=verbose)
def get_score(self):
return self.__score
class Predictor:
def __init__(self, nnet: NeuralNetwork, dataset: DataSet):
self._nnet = nnet
self._dataset = dataset
self._score = {}
self._prediction = []
self._preprocess()
def _preprocess(self):
categorical_data = DataSet.dataframe_to_series(self._dataset.get_data().select_dtypes(include='category'))
if isinstance(self._nnet, OptimizedNeuralNetwork):
continuous_data = DataSet.dataframe_to_series(self._dataset.get_data().select_dtypes(exclude='category'))
self._dataset = [*categorical_data, *continuous_data]
else:
continuous_data = self._dataset.get_data().select_dtypes(exclude='category').values
self._dataset = [*categorical_data, continuous_data]
def predict(self):
self._prediction = self._nnet.get_model().predict(self._dataset).convert_into_one_dim()
return self._prediction
def evaluate(self, reality_values, show_plot: bool = False):
if len(self._prediction) > 0:
rounded_pred = bn.round(self._prediction)
tp = bn.total_count(bn.logic_and_element_wise(rounded_pred == 1, reality_values == 1))
tn = bn.total_count(bn.logic_and_element_wise(rounded_pred == 0, reality_values == 0))
fp = bn.total_count(bn.logic_and_element_wise(rounded_pred == 1, reality_values == 0))
fn = bn.total_count( | bn.logic_and_element_wise(rounded_pred == 0, reality_values == 1) | numpy.logical_and |