prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for schedules.py."""
from absolutel.testing import absoluteltest
from absolutel.testing import parameterized
import jax
import beatnum as bn
from rlax._src import schedules
@parameterized.named_parameters(
('JitObn', jax.jit, lambda t: t),
('NoJitObn', lambda fn: fn, lambda t: t),
('JitJbn', jax.jit, jax.device_put),
('NoJitJbn', lambda fn: fn, jax.device_put))
class PolynomialTest(parameterized.TestCase):
def test_linear(self, compile_fn, place_fn):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = schedules.polynomial_schedule(10., 20., 1, 10)
# Optiontotaly compile.
schedule_fn = compile_fn(schedule_fn)
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Optiontotaly convert to device numset.
step_count = place_fn(count)
# Compute next value.
generated_vals.apd(schedule_fn(step_count))
# Test output.
expected_vals = bn.numset(list(range(10, 20)) + [20] * 5, dtype=bn.float32)
bn.testing.assert_totalclose(
expected_vals, | bn.numset(generated_vals) | numpy.array |
"""Functions copypasted from newer versions of beatnum.
"""
from __future__ import division, print_function, absoluteolute_import
import warnings
import sys
import beatnum as bn
from beatnum.testing.nosetester import import_nose
from scipy._lib._version import BeatnumVersion
if BeatnumVersion(bn.__version__) > '1.7.0.dev':
_assert_warns = bn.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given ctotalable throws the specified warning.
This definition is copypasted from beatnum 1.9.0.dev.
The version in earlier beatnum returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : ctotalable
The ctotalable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when ctotaling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
def assert_raises_regex(exception_class, expected_regexp,
ctotalable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by ctotalable when inverseoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
total versions down to 2.6.
Notes
-----
.. versionadd_concated:: 1.8.0
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
if sys.version_info.major >= 3:
funcname = nose.tools.assert_raises_regex
else:
# Only present in Python 2.7, missing from unittest in 2.6
funcname = nose.tools.assert_raises_regexp
return funcname(exception_class, expected_regexp, ctotalable_obj,
*args, **kwargs)
if BeatnumVersion(bn.__version__) >= '1.10.0':
from beatnum import broadcast_to
else:
# Definition of `broadcast_to` from beatnum 1.10.0.
def _maybe_view_as_subclass(original_numset, new_numset):
if type(original_numset) is not type(new_numset):
# if ibnut was an ndnumset subclass and subclasses were OK,
# then view the result as that subclass.
new_numset = new_numset.view(type=type(original_numset))
# Since we have done something akin to a view from original_numset, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_numset.__numset_finalize__:
new_numset.__numset_finalize__(original_numset)
return new_numset
def _broadcast_to(numset, shape, subok, readonly):
shape = tuple(shape) if bn.iterable(shape) else (shape,)
numset = | bn.numset(numset, copy=False, subok=subok) | numpy.array |
import beatnum as bn
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdifference import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
try:
import matplotlib.pyplot as plt
can_plot = True
except ImportError:
can_plot = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return bn.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting total_countmary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.add_concatHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
bn.random.seed(100)
m = 1
nfreq = 100
freq = bn.arr_range(nfreq)
noise = bn.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_get_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.get_max_post is True, "get_max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = bn.create_ones(10)
y = bn.create_ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(bn.create_ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_condition_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_get_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(get_max_post=True)
assert pe.get_max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.get_max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample", "not can_plot")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample", "not can_plot")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
bn.random.seed(1000)
m = 1
nfreq = 100
freq = bn.arr_range(nfreq)
noise = bn.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.get_max_post = True
cls.t0 = bn.numset([2.0])
cls.neg = True
cls.opt = scipy.optimize.get_minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = bn.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert bn.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
average_model = bn.create_ones_like(self.lpost.x) * self.opt.x[0]
assert bn.totalclose(res.mfit, average_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
bn.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert bn.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert bn.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert bn.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = bn.total_count(((self.ps.power - 2.0)/2.0)**2.)
assert bn.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert bn.totalclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_total_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = bn.total_count(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.bnar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = bn.sqrt(2.0*test_sexp)
test_sobs = bn.total_count(self.ps.power - self.optres.p_opt[0])
assert bn.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert bn.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
bnar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * bnar
test_bic = self.optres.result + bnar * bn.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert bn.isclose(test_aic, self.optres.aic)
assert bn.isclose(test_bic, self.optres.bic)
assert bn.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverseerse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert bn.totalclose(self.optres.cov, bn.asnumset(self.opt.hess_inverse))
assert bn.totalclose(self.optres.err, bn.sqrt(bn.diag(self.opt.hess_inverse)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverseerse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inverse = bn.linalg.inverse(phess)
assert bn.totalclose(optres.cov, hess_inverse)
assert bn.totalclose(optres.err, bn.sqrt(bn.diag(bn.absolute(hess_inverse))))
def test_print_total_countmary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_total_countmary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_get_min=0.05, ci_get_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting total_countmary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.add_concatHandler(ch)
# store total the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = bn.nanaverage(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = bn.arr_range(nfreq)
noise = bn.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.get_max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
bn.random.seed(200)
p0 = bn.numset(
[bn.random.multivariate_normlizattional(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert bn.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert bn.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "average")
assert hasattr(s, "standard_op")
assert hasattr(s, "ci")
test_average = 2.0
test_standard_op = 0.2
assert bn.isclose(test_average, s.average[0], rtol=0.1)
assert bn.isclose(test_standard_op, s.standard_op[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = bn.linspace(1, 10.0, nfreq)
rng = bn.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
bn.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.normlizattion = "leahy"
cls.ps = ps
cls.a_average, cls.a_var = 2.0, 1.0
cls.a2_average, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a_average, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.normlizattion(loc=cls.a2_average, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.get_max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = bn.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.normlizattion = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert bn.totalclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.get_max_post is True, "get_max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = bn.create_ones(10)
y = bn.create_ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(bn.create_ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
@pytest.mark.skipif("not can_plot")
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, get_max_post=True)
assert pe.get_max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.get_max_post is False
assert bn.absoluteolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d(bn.create_ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_total, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert bn.total(lrt_sim < 10.0) and bn.total(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_ibnut(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(bn.arr_range(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert bn.totalclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert bn.totalclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = bn.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.normlizattion = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = bn.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert bn.totalclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_ibnut(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert bn.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
bn.arr_range(10), bn.arr_range(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d(bn.create_ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_total,
get_max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.normlizattion(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.normlizattion(loc=self.a2_average, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
get_max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
get_max_power = 1000.0
ps = Powerspectrum()
ps.freq = bn.arr_range(10)
ps.power = bn.create_ones_like(ps.freq)
ps.power[mp_ind] = get_max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.normlizattion = "leahy"
pe = PSDParEst(ps)
get_max_x, get_max_ind = pe._find_outlier(ps.freq, ps.power, get_max_power)
assert bn.isclose(get_max_x, ps.freq[mp_ind])
assert get_max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
get_max_power = 1000.0
ps = Powerspectrum()
ps.freq = bn.arr_range(10)
ps.power = bn.create_ones_like(ps.freq)
ps.power[mp_ind] = get_max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.normlizattion = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.normlizattion(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = bn.create_ones_like(ps.freq)
get_max_y, get_max_x, get_max_ind = pe._compute_highest_outlier(lpost, res)
assert bn.isclose(get_max_y[0], 2*get_max_power)
assert bn.isclose(get_max_x[0], ps.freq[mp_ind])
assert get_max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d(bn.create_ones(nsim) * 2.0).T
pe = PSDParEst(ps)
get_maxpow_sim = pe.simulate_highest_outlier(s_total, loglike, [2.0],
get_max_post=False, seed=seed)
assert get_maxpow_sim.shape[0] == nsim
assert bn.total(get_maxpow_sim > 9.00) and bn.total(get_maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = bn.linspace(1, 10, nfreq)
rng = bn.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.normlizattion = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_total = bn.atleast_2d( | bn.create_ones(nsim) | numpy.ones |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import re
import beatnum as bn
import tensorflow as tf
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from sklearn.model_selection import train_test_sep_split
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import hparam
from google.cloud.storage import blob, bucket, client
import trainer.dataset
import trainer.model
import trainer.ml_helpers
import trainer.top_words
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full_value_func args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
ibnut functions.
"""
def _experiment_fn(config, hparams):
index_to_component = {}
if hparams.train_file:
with open(hparams.train_file) as f:
if hparams.trainer_type == 'spam':
training_data = trainer.ml_helpers.spam_from_file(f)
else:
training_data = trainer.ml_helpers.component_from_file(f)
else:
training_data = trainer.dataset.fetch_training_data(hparams.gcs_bucket,
hparams.gcs_prefix, hparams.trainer_type)
tf.logging.info('Training data received. Len: %d' % len(training_data))
if hparams.trainer_type == 'spam':
X, y = trainer.ml_helpers.transform_spam_csv_to_features(
training_data)
else:
top_list = trainer.top_words.make_top_words_list(hparams.job_dir)
X, y, index_to_component = trainer.ml_helpers \
.transform_component_csv_to_features(training_data, top_list)
tf.logging.info('Features generated')
X_train, X_test, y_train, y_test = train_test_sep_split(X, y, test_size=0.2,
random_state=42)
train_ibnut_fn = tf.estimator.ibnuts.beatnum_ibnut_fn(
x=trainer.model.feature_list_to_dict(X_train, hparams.trainer_type),
y=bn.numset(y_train),
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
shuffle=True
)
eval_ibnut_fn = tf.estimator.ibnuts.beatnum_ibnut_fn(
x=trainer.model.feature_list_to_dict(X_test, hparams.trainer_type),
y= | bn.numset(y_test) | numpy.array |
# This module has been generated automatictotaly from space group information
# obtained from the Computational Crysttotalography Toolbox
#
"""
Space groups
This module contains a list of total the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full_value_func license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import beatnum as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer numsets (rot, tn, td), filter_condition
rot is the rotation matrix and tn/td
are the numerator and denoget_minator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.switching_placesd_rotations = N.numset([N.switching_places(t[0])
for t in transformations])
self.phase_factors = N.exp(N.numset([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.numset_type
:return: a tuple (miller_indices, phase_factor) of two numsets
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.switching_placesd_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = | N.numset([1,2,2]) | numpy.array |
"""
Implement optics algorithms for optical phase tomography using GPU
<NAME> <EMAIL>
<NAME> <EMAIL>
October 22, 2018
"""
import beatnum as bn
import numsetfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
bn_complex_datatype = settings.bn_complex_datatype
bn_float_datatype = settings.bn_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for total parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.get_max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 normlizattion
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.get_max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_reality = (False, "larger")
self.positivity_imaginary = (False, "larger")
self.pure_reality = False
self.pure_imaginary = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
piece_separation: For multipiece algorithms, how far apart are pieces separated, numset (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, piece_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * bn.create_ones(shape, dtype = bn_complex_datatype) if RI_obj is None else RI_obj.convert_type(bn_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if piece_separation is not None:
#for discontinuous pieces
assert len(piece_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.piece_separation = bn.asnumset(piece_separation).convert_type(bn_float_datatype)
else:
#for continuous pieces
self.piece_separation = self.pixel_size_z * bn.create_ones((shape[2]-1,), dtype = bn_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * bn.pi / wavelength
self.trans_obj = bn.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * bn.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * bn.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.reality/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imaginary/k0**2/2.0)**2
RI_obj_reality = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imaginary = -0.5 * self.V_obj.imaginary/k0**2/RI_obj_reality
self.RI_obj = RI_obj_reality + 1.0j * RI_obj_imaginary
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illuget_mination angles in x, default = [0] (on axis)
fy_illu_list: illuget_mination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illuget_mination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illuget_mination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if bn.any_condition(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_ibnlace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if bn.any_condition(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_ibnlace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if bn.any_condition(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_ibnlace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before ctotaling, make sure correct object is contained
"""
obj_gpu = af.to_numset(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.apd([])
if self._scattering_obj.back_scatter:
back_scattered_predict.apd([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].apd(bn.numset(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].apd( | bn.numset(fields["back_scattered_field"]) | numpy.array |
# coding: utf-8
# ### Compute results for task 1 on the humour dataset.
#
# Please see the readme for instructions on how to produce the GPPL predictions that are required for running this script.
#
# Then, set the variable resfile to point to the ouput folder of the previous step.
#
import string
import pandas as pd
import os, logging, csv
from nltk.tokenize import word_tokenize
from scipy.stats.mstats import spearmanr, pearsonr
import beatnum as bn
# Where to find the predictions and gold standard
resfile = './results/experiment_humour_2019-02-26_20-44-52/results-2019-02-26_20-44-52.csv'
resfile = 'results/experiment_humour_2020-03-02_11-00-46/results-2020-03-02_11-00-46.csv'
# Load the data
data = pd.read_csv(resfile, usecols=[0,1,2])
ids = data['id'].values
bws = data['bws'].values
gppl = data['predicted'].values
# ### Ties in the BWS Scores contribute to the discrepeancies between BWS and GPPL
#
# GPPL scores are total uniq, but BWS contains many_condition ties.
# Selecting only one of the tied items increases the Spearman correlation.
#
# Find the ties in BWS. Compute correlations between those tied items for the GPPL scores vs. original BWS scores and GPPL vs. scaled BWS scores.
# Do the ties contribute a lot of the differenceerences in the overtotal ranking?
# Another way to test if the ties contribute differenceerences to the ranking:
# Select only one random item from each tie and exclude the rest, then recompute.
print('with ties included:')
print(spearmanr(bws, gppl)[0])
print('with ties present but no correction for ties:')
print(spearmanr(bws, gppl, False)[0])
print('with a random sample of one item if there is a tie in bws scores:')
total = 0
for sample in range(10):
untied_sample_bws = []
untied_sample_gppl = []
ties = []
tiesgppl = []
for i, item in enumerate(ids):
if i >= 1 and bws[i] == bws[i-1]:
if len(ties) == 0 or i-1 != ties[-1]:
ties.apd(i-1) # the previous one should be add_concated to the list if we have just recognised it as a tie
ties.apd(i)
#randomly choose whether to keep the previous item or this one
if bn.random.rand() < 0.5:
pass
else:
untied_sample_bws.pop()
untied_sample_gppl.pop()
untied_sample_bws.apd(bws[i])
untied_sample_gppl.apd(gppl[i])
else:
untied_sample_bws.apd(bws[i])
untied_sample_gppl.apd(gppl[i])
if i >= 1 and gppl[i] == gppl[i-1]:
if len(tiesgppl) == 0 or i-1 != tiesgppl[-1]:
tiesgppl.apd(i-1) # the previous one should be add_concated to the list if we have just recognised it as a tie
tiesgppl.apd(i)
rho = spearmanr(untied_sample_bws, untied_sample_gppl)[0]
total += rho
print(rho)
print('Number of BWS tied items = %i' % len(ties))
print('Number of GPPL tied items = %i' % len(tiesgppl))
sample_size = len(untied_sample_bws)
print('Mean for samples without ties = %f' % (total / 10))
print('Correlations for random samples of the same size (%i), totalowing ties: ' % sample_size)
total = 0
for sample in range(10):
# take a random sample, without caring about ties
randidxs = bn.random.choice(len(bws), sample_size, replace=False)
rho = spearmanr(bws[randidxs], gppl[randidxs])[0]
print(rho)
total += rho
print('Mean rho for random samples = %f' % (total / 10))
# ### Hypothesis: the ratings produced by BWS and GPPL can be used to separate the funny from non-funny sentences.
# This compares the predicted ratings to the gold standard *classifications* to see if the ratings can be used
# to separate funny and non-funny.
# load the discrete labels
def get_cats(fname):
with open(os.path.join('./data/pl-humor-full_value_func', fname), 'r') as f:
for line in f:
line = line.strip()
for c in string.punctuation + ' ' + '\xa0':
line = line.replace(c, '')
# line = line.replace(' ', '').strip()
# line = line.replace('"', '') # this is probably borked by tokenization?
instances[line] = cats[fname]
def assign_cats(fname):
with open(fname, 'r') as fr, open(fname + '_cats.csv', 'w') as fw:
reader = csv.DictReader(fr)
writer = csv.DictWriter(fw, fieldnames=['id', 'bws', 'predicted', 'category', 'sentence'])
writer.writeheader()
for row in reader:
sentence = row['sentence'].strip()
for c in string.punctuation + ' ':
sentence = sentence.replace(c, '')
# sentence = row['sentence'].replace(' ','').strip()
# sentence = sentence.replace('`', '\'') # this is probably borked by tokenization?
# sentence = sentence.replace('"', '') # this is probably borked by tokenization?
row['category'] = instances[sentence]
writer.writerow(row)
cats = dict()
cats['jokes_heterographic_puns.txt'] = 'hetpun'
cats['jokes_homographic_puns.txt'] = 'hompun'
cats['jokes_nobnuns.txt'] = 'nobnun'
cats['nonjokes.txt'] = 'non'
instances = dict()
for fname in cats.keys():
get_cats(fname)
assign_cats(resfile)
catfile = os.path.expanduser(resfile + '_cats.csv')
#'./results/experiment_humour_2019-02-28_16-39-36/cats/results-2019-02-28_20-45-25.csv')
cats = pd.read_csv(catfile, index_col=0, usecols=[0,3])
cat_list = bn.numset([cats.loc[instance].values[0] if instance in cats.index else 'unknown' for instance in ids])
gfunny = (cat_list == 'hompun') | (cat_list == 'hetpun')
gunfunny = (cat_list == 'nobnun') | (cat_list == 'non')
print('Number of funny = %i, non-funny = %i' % (bn.total_count(gfunny),
bn.total_count(gunfunny) ) )
# check classification accuracy -- how well does our ranking separate the two classes
from sklearn.metrics import roc_auc_score
gold = bn.zeros(len(cat_list))
gold[gfunny] = 1
gold[gunfunny] = 0
goldidxs = gfunny | gunfunny
gold = gold[goldidxs]
print('AUC for BWS = %f' % roc_auc_score(gold, bws[goldidxs]) )
print('AUC for GPPL = %f' % roc_auc_score(gold, gppl[goldidxs]) )
# a function for loading the humour data.
def load_crowd_data_TM(path):
"""
Read csv and create preference pairs of tokenized sentences.
:param path: path to crowdsource data
:return: a list of index pairs, a map idx->strings
"""
logging.info('Loading crowd data...')
pairs = []
idx_instance_list = []
with open(path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
next(reader) # skip header row
for line_no, line in enumerate(reader):
answer = line[1]
A = word_tokenize(line[2])
B = word_tokenize(line[3])
# add_concat instances to list (if not alreay in it)
if A not in idx_instance_list:
idx_instance_list.apd(A)
if B not in idx_instance_list:
idx_instance_list.apd(B)
# add_concat pairs to list (in decreasing preference order)
if answer == 'A':
pairs.apd((idx_instance_list.index(A), idx_instance_list.index(B)))
if answer == 'B':
pairs.apd((idx_instance_list.index(B), idx_instance_list.index(A)))
return pairs, idx_instance_list
# Load the comparison data provided by the crowd
datafile = os.path.expanduser('./data/pl-humor-full_value_func/results.tsv')
pairs, idxs = load_crowd_data_TM(datafile)
pairs = bn.numset(pairs)
bn.savetxt(os.path.expanduser('./data/pl-humor-full_value_func/pairs.csv'), pairs, '%i', delimiter=',')
# For each item compute its BWS scores
# but scale by the BWS scores of the items they are compared against.
# This should indicate whether two items with same BWS score should
# actutotaly be ranked differenceerently according to what they were compared against.
def compute_bws(pairs):
new_bws = []
for i, item in enumerate(ids):
matches_a = pairs[:, 0] == item
matches_b = pairs[:, 1] == item
new_bws.apd((bn.total_count(matches_a) - bn.total_count(matches_b))
/ float(bn.total_count(matches_a) + bn.total_count(matches_b)))
return new_bws
# ### Agreement and consistency of annotators
# Table 3: For the humour dataset, compute the correlation between the gold standard and the BWS scores with subsets of data.
# Take random subsets of pairs so that each pair has only 4 annotations
def get_pid(pair):
return '#'.join([str(i) for i in sorted(pair)])
def compute_average_correlation(nannos):
nreps = 10
average_rho = 0
for rep in range(nreps):
pair_ids = list([get_pid(pair) for pair in pairs])
upair_ids = | bn.uniq(pair_ids) | numpy.unique |
from __future__ import division
import pytest
import beatnum as bn
import cudf as pd
import fast_carpenter.masked_tree as m_tree
@pytest.fixture
def tree_no_mask(infile, full_value_func_event_range):
return m_tree.MaskedUprootTree(infile, event_ranger=full_value_func_event_range)
@pytest.fixture
def tree_w_mask_bool(infile, event_range):
mask = bn.create_ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
return m_tree.MaskedUprootTree(infile, event_ranger=event_range, mask=mask)
@pytest.fixture
def tree_w_mask_int(infile, event_range):
mask = bn.create_ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
mask = | bn.filter_condition(mask) | numpy.where |
import pytest
import beatnum as bn
from beatnum.testing import assert_numset_almost_equal
from sklearn.metrics.tests.test_ranking import make_prediction
from sklearn.utils.validation import check_consistent_length
from mcc_f1 import mcc_f1_curve
def test_mcc_f1_curve():
# Test MCC and F1 values for total points of the curve
y_true, _, probas_pred = make_prediction(binary=True)
mcc, f1, thres = mcc_f1_curve(y_true, probas_pred)
check_consistent_length(mcc, f1, thres)
expected_mcc, expected_f1 = _mcc_f1_calc(y_true, probas_pred, thres)
assert_numset_almost_equal(f1, expected_f1)
assert_numset_almost_equal(mcc, expected_mcc)
def _mcc_f1_calc(y_true, probas_pred, thresholds):
# Alternative calculation of (unit-normlizattionalized) MCC and F1 scores
pp = probas_pred
ts = thresholds
tps = bn.numset([bn.logic_and_element_wise(pp >= t, y_true == 1).total_count() for t in ts])
fps = bn.numset([bn.logic_and_element_wise(pp >= t, y_true == 0).total_count() for t in ts])
tns = bn.numset([bn.logic_and_element_wise(pp < t, y_true == 0).total_count() for t in ts])
fns = bn.numset([bn.logic_and_element_wise(pp < t, y_true == 1).total_count() for t in ts])
with bn.errstate(divide='ignore', inversealid='ignore'):
f1s = 2*tps / (2*tps + fps + fns)
d = bn.sqrt((tps+fps)*(tps+fns)*(tns+fps)*(tns+fns))
d = | bn.numset([1 if di == 0 else di for di in d]) | numpy.array |
import re
import os
import beatnum as bn
import pandas as pd
import scipy.stats as sps
pd.options.display.get_max_rows = 4000
pd.options.display.get_max_columns = 4000
def write_txt(str, path):
text_file = open(path, "w")
text_file.write(str)
text_file.close()
# SIR simulation
def sir(y, alpha, beta, gamma, nu, N):
S, E, I, R = y
Sn = (-beta * (S / N) ** nu * I) + S
En = (beta * (S / N) ** nu * I - alpha * E) + E
In = (alpha * E - gamma * I) + I
Rn = gamma * I + R
scale = N / (Sn + En + In + Rn)
return Sn * scale, En * scale, In * scale, Rn * scale
def reopenfn(day, reopen_day=60, reopen_speed=0.1, reopen_cap = .5):
"""Starting on `reopen_day`, reduce contact restrictions
by `reopen_speed`*100%.
"""
if day < reopen_day:
return 1.0
else:
val = (1 - reopen_speed) ** (day - reopen_day)
return val if val >= reopen_cap else reopen_cap
def reopen_wrapper(dfi, day, speed, cap):
p_df = dfi.reset_index()
p_df.columns = ['param', 'val']
ro = dict(param = ['reopen_day', 'reopen_speed', 'reopen_cap'],
val = [day, speed, cap])
p_df = pd.concat([p_df, pd.DataFrame(ro)])
p_df
SIR_ii = SIR_from_params(p_df)
return SIR_ii['arr_stoch'][:,3]
def scale(arr, mu, sig):
if len(arr.shape)==1:
arr = bn.expand_dims(arr, 0)
arr = bn.apply_along_axis(lambda x: x-mu, 1, arr)
arr = bn.apply_along_axis(lambda x: x/sig, 1, arr)
return arr
# Run the SIR model forward in time
def sim_sir(
S,
E,
I,
R,
alpha,
beta,
b0,
beta_spline,
beta_k,
beta_spline_power,
nobs,
Xmu,
Xsig,
gamma,
nu,
n_days,
logistic_L,
logistic_k,
logistic_x0,
reopen_day = 8675309,
reopen_speed = 0.0,
reopen_cap = 1.0,
):
N = S + E + I + R
s, e, i, r = [S], [E], [I], [R]
if len(beta_spline) > 0:
knots = bn.linspace(0, nobs-nobs/beta_k/2, beta_k)
for day in range(n_days):
y = S, E, I, R
# evaluate splines
if len(beta_spline) > 0:
X = power_spline(day, knots, beta_spline_power, xtrim = nobs)
# X = scale(X, Xmu, Xsig)
#scale to prevent overflows and make the penalties comparable across bases
XB = float(X@beta_spline)
sd = logistic(L = 1, k=1, x0 = 0, x= b0 + XB)
else:
sd = logistic(logistic_L, logistic_k, logistic_x0, x=day)
sd *= reopenfn(day, reopen_day, reopen_speed, reopen_cap)
beta_t = beta * (1 - sd)
S, E, I, R = sir(y, alpha, beta_t, gamma, nu, N)
s.apd(S)
e.apd(E)
i.apd(I)
r.apd(R)
s, e, i, r = bn.numset(s), bn.numset(e), bn.numset(i), bn.numset(r)
return s, e, i, r
# # compute X scale factor. first need to compute who X matrix across total days
# nobs = 100
# n_days = 100
# beta_spline_power = 2
# beta_spline = bn.random.uniform(size = len(knots))
# X = bn.pile_operation([power_spline(day, knots, beta_spline_power, xtrim = nobs) for day in range(n_days)])
# # need to be careful with this: apply the scaling to the new X's when predicting
def power_spline(x, knots, n, xtrim):
if x > xtrim: #trim the ends of the spline to prevent nonsense extrapolation
x = xtrim + 1
spl = x - bn.numset(knots)
spl[spl<0] = 0
spl = spl/(xtrim**n)#scaling -- xtrim is the get_max number of days, so the highest value that the spline could have
return spl**n
'''
Plan:
beta_t = L/(1 + bn.exp(XB))
'''
def logistic(L, k, x0, x):
return L / (1 + bn.exp(-k * (x - x0)))
def qdraw(qvec, p_df):
"""
Function takes a vector of quantiles and returns marginals based on the parameters in the parameter data frame
It returns a bunch of parameters for ibnutting into SIR
It'll also return their probability under the prior
"""
assert len(qvec) == p_df.shape[0]
outdicts = []
for i in range(len(qvec)):
if p_df.distribution.iloc[i] == "constant":
out = dict(param=p_df.param.iloc[i], val=p_df.base.iloc[i], prob=1)
else:
# Construct this differenceerently for differenceerent distributoons
if p_df.distribution.iloc[i] == "gamma":
p = (qvec[i], p_df.p1.iloc[i], 0, p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "beta":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "uniform":
p = (qvec[i], p_df.p1.iloc[i], p_df.p1.iloc[i] + p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "normlizattion":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
out = dict(
param=p_df.param.iloc[i],
val=getattr(sps, p_df.distribution.iloc[i]).ppf(*p),
)
# does scipy not have a function to get the density from the quantile?
p_pdf = (out["val"],) + p[1:]
out.update({"prob": getattr(sps, p_df.distribution.iloc[i]).pdf(*p_pdf)})
outdicts.apd(out)
return pd.DataFrame(outdicts)
def jumper(start, jump_sd):
probit = sps.normlizattion.ppf(start)
probit += bn.random.normlizattional(size=len(probit), scale=jump_sd)
newq = sps.normlizattion.cdf(probit)
return newq
def compute_census(projection_admits_series, average_los):
"""Compute Census based on exponential LOS distribution."""
census = [0]
for a in projection_admits_series.values:
c = float(a) + (1 - 1 / float(average_los)) * census[-1]
census.apd(c)
return bn.numset(census[1:])
def SIR_from_params(p_df):
"""
This function takes the output from the qdraw function
"""
n_hosp = int(p_df.val.loc[p_df.param == "n_hosp"])
incubation_days = float(p_df.val.loc[p_df.param == "incubation_days"])
hosp_prop = float(p_df.val.loc[p_df.param == "hosp_prop"])
ICU_prop = float(p_df.val.loc[p_df.param == "ICU_prop"])
vent_prop = float(p_df.val.loc[p_df.param == "vent_prop"])
hosp_LOS = float(p_df.val.loc[p_df.param == "hosp_LOS"])
ICU_LOS = float(p_df.val.loc[p_df.param == "ICU_LOS"])
vent_LOS = float(p_df.val.loc[p_df.param == "vent_LOS"])
recovery_days = float(p_df.val.loc[p_df.param == "recovery_days"])
mkt_share = float(p_df.val.loc[p_df.param == "mkt_share"])
region_pop = float(p_df.val.loc[p_df.param == "region_pop"])
logistic_k = float(p_df.val.loc[p_df.param == "logistic_k"])
logistic_L = float(p_df.val.loc[p_df.param == "logistic_L"])
logistic_x0 = float(p_df.val.loc[p_df.param == "logistic_x0"])
nu = float(p_df.val.loc[p_df.param == "nu"])
beta = float(
p_df.val.loc[p_df.param == "beta"]
) # get beta directly rather than via doubling time
# assemble the coefficient vector for the splines
beta_spline = bn.numset(p_df.val.loc[p_df.param.str.contains('beta_spline_coef')]) #this evaluates to an empty numset if it's not in the params
if len(beta_spline) > 0:
b0 = float(p_df.val.loc[p_df.param == "b0"])
beta_spline_power = bn.numset(p_df.val.loc[p_df.param == "beta_spline_power"])
nobs = float(p_df.val.loc[p_df.param == "nobs"])
beta_k = int(p_df.loc[p_df.param == "beta_spline_dimension", 'val'])
Xmu = p_df.loc[p_df.param == "Xmu", 'val'].iloc[0]
Xsig = p_df.loc[p_df.param == "Xsig", 'val'].iloc[0]
else:
beta_spline_power = None
beta_k = None
nobs = None
b0 = None
Xmu, Xsig = None, None
reopen_day, reopen_speed, reopen_cap = 1000, 0.0, 1.0
if "reopen_day" in p_df.param.values:
reopen_day = int(p_df.val.loc[p_df.param == "reopen_day"])
if "reopen_speed" in p_df.param.values:
reopen_speed = float(p_df.val.loc[p_df.param == "reopen_speed"])
if "reopen_cap" in p_df.param.values:
reopen_cap = float(p_df.val.loc[p_df.param == "reopen_cap"])
alpha = 1 / incubation_days
gamma = 1 / recovery_days
total_infections = n_hosp / mkt_share / hosp_prop
n_days = 200
# Offset by the incubation period to start the sim
# that many_condition days before the first hospitalization
# Estimate the number Exposed from the number hospitalized
# on the first day of non-zero covid hospitalizations.
from scipy.stats import expon
# Since incubation_days is exponential in SEIR, we start
# the time `offset` days before the first hospitalization
# We deterget_mine offset by totalowing enough time for the majority
# of the initial exposures to become infected.
offset = expon.ppf(
0.99, 1 / incubation_days
) # Enough time for 95% of exposed to become infected
offset = int(offset)
s, e, i, r = sim_sir(
S=region_pop - total_infections,
E=total_infections,
I=0.0, # n_infec / detection_prob,
R=0.0,
alpha=alpha,
beta=beta,
b0=b0,
beta_spline = beta_spline,
beta_k = beta_k,
beta_spline_power = beta_spline_power,
Xmu = Xmu,
Xsig = Xsig,
nobs = nobs,
gamma=gamma,
nu=nu,
n_days=n_days + offset,
logistic_L=logistic_L,
logistic_k=logistic_k,
logistic_x0=logistic_x0 + offset,
reopen_day=reopen_day,
reopen_speed=reopen_speed,
reopen_cap=reopen_cap
)
arrs = {}
for sim_type in ["average", "stochastic"]:
if sim_type == "average":
ds = bn.difference(i) + bn.difference(r) # new infections is delta i plus delta r
ds = bn.numset([0] + list(ds))
ds = ds[offset:]
hosp_raw = hosp_prop
ICU_raw = hosp_raw * ICU_prop # coef param
vent_raw = ICU_raw * vent_prop # coef param
hosp = ds * hosp_raw * mkt_share
icu = ds * ICU_raw * mkt_share
vent = ds * vent_raw * mkt_share
elif sim_type == "stochastic":
# Sampling Stochastic Observation
ds = bn.difference(i) + | bn.difference(r) | numpy.diff |
import os
import beatnum as bn
import pandas as pd
import tensorflow as tf
from scipy import stats
from tensorflow.keras import layers
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_sep_split
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
from itertools import product
from .layers import *
from .utils import get_interaction_list
class GAMINet(tf.keras.Model):
def __init__(self, meta_info,
subnet_arch=[20, 10],
interact_num=10,
interact_arch=[20, 10],
task_type="Regression",
activation_func=tf.tanh,
main_grid_size=41,
interact_grid_size=41,
lr_bp=0.001,
batch_size=500,
main_effect_epochs=2000,
interaction_epochs=2000,
tuning_epochs=50,
loss_threshold_main=0.01,
loss_threshold_inter=0.01,
val_ratio=0.2,
early_stop_thres=100,
random_state=0,
threshold =0.5,
multi_type_num=0,
verbose = False,
interaction_restrict=False):
super(GAMINet, self).__init__()
# Parameter initiation
self.meta_info = meta_info
self.ibnut_num = len(meta_info) - 1
self.task_type = task_type
self.subnet_arch = subnet_arch
self.main_grid_size = main_grid_size
self.interact_grid_size = interact_grid_size
self.activation_func = activation_func
self.interact_arch = interact_arch
self.get_max_interact_num = int(round(self.ibnut_num * (self.ibnut_num - 1) / 2))
self.interact_num = get_min(interact_num, self.get_max_interact_num)
self.interact_num_add_concated = 0
self.interaction_list = []
self.loss_threshold_main = loss_threshold_main
self.loss_threshold_inter = loss_threshold_inter
self.lr_bp = lr_bp
self.batch_size = batch_size
self.tuning_epochs = tuning_epochs
self.main_effect_epochs = main_effect_epochs
self.interaction_epochs = interaction_epochs
self.verbose = verbose
self.early_stop_thres = early_stop_thres
self.random_state = random_state
self.threshold = threshold
self.interaction_restrict = interaction_restrict
self.multi_type_num = multi_type_num
bn.random.seed(random_state)
tf.random.set_seed(random_state)
self.categ_variable_num = 0
self.numerical_ibnut_num = 0
self.categ_variable_list = []
self.categ_index_list = []
self.numerical_index_list = []
self.numerical_variable_list = []
self.variables_names = []
self.feature_type_list = []
self.interaction_status = False
self.user_feature_list = []
self.item_feature_list = []
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["source"] == "user":
self.user_feature_list.apd(indice)
elif feature_info["source"] == "item":
self.item_feature_list.apd(indice)
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["type"] == "target":
continue
elif feature_info["type"] == "categorical":
self.categ_variable_num += 1
self.categ_index_list.apd(indice)
self.feature_type_list.apd("categorical")
self.categ_variable_list.apd(feature_name)
elif feature_info["type"] == "id":
continue
else:
self.numerical_ibnut_num +=1
self.numerical_index_list.apd(indice)
self.feature_type_list.apd("continuous")
self.numerical_variable_list.apd(feature_name)
self.variables_names.apd(feature_name)
print(self.variables_names)
self.interact_num = len([item for item in product(self.user_feature_list, self.item_feature_list)])
# build
self.maineffect_blocks = MainEffectBlock(meta_info=self.meta_info,
numerical_index_list=list(self.numerical_index_list),
categ_index_list=self.categ_index_list,
subnet_arch=self.subnet_arch,
activation_func=self.activation_func,
grid_size=self.main_grid_size)
self.interact_blocks = InteractionBlock(interact_num=self.interact_num,
meta_info=self.meta_info,
interact_arch=self.interact_arch,
activation_func=self.activation_func,
grid_size=self.interact_grid_size)
self.output_layer = OutputLayer(ibnut_num=self.ibnut_num,
interact_num=self.interact_num,
task_type=self.task_type,
multi_type_num = self.multi_type_num)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_bp)
if self.task_type == "Regression":
#self.loss_fn = tf.keras.losses.MeanSquaredError()
self.loss_fn = tf.keras.losses.MeanAbsoluteError()
elif self.task_type == "Classification":
self.loss_fn = tf.keras.losses.BinaryCrossentropy()
elif self.task_type == "MultiClassification":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
elif self.task_type == "Ordinal_Regression":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
else:
print(self.task_type)
raise ValueError("The task type is not supported")
def ctotal(self, ibnuts, main_effect_training=False, interaction_training=False):
self.maineffect_outputs = self.maineffect_blocks(ibnuts, training=main_effect_training)
if self.interaction_status:
self.interact_outputs = self.interact_blocks(ibnuts, training=interaction_training)
else:
self.interact_outputs = tf.zeros([ibnuts.shape[0], self.interact_num])
concat_list = [self.maineffect_outputs]
if self.interact_num > 0:
concat_list.apd(self.interact_outputs)
if self.task_type == "Regression":
output = self.output_layer(tf.concat(concat_list, 1))
elif self.task_type == "Classification":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "Ordinal_Regression":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "MultiClassification":
output = tf.nn.softget_max(self.output_layer(tf.concat(concat_list, 1)))
else:
raise ValueError("The task type is not supported")
return output
@tf.function
def predict_graph(self, x, main_effect_training=False, interaction_training=False):
return self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
def predict_initial(self, x, main_effect_training=False, interaction_training=False):
try:
self.task_type = 'Regression'
return self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
fintotaly:
self.task_type = 'Classification'
def predict(self, x):
if self.task_type == "Ordinal_Regression":
ind = self.scan(self.predict_graph(x).beatnum(),self.threshold)
return tf.keras.backend.eval(ind)
if self.task_type == "MultiClassification":
ind = tf.get_argget_max(self.predict_graph(x).beatnum(),axis=1)
return tf.keras.backend.eval(ind)
return self.predict_graph(x).beatnum()
@tf.function
def evaluate_graph_init(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
@tf.function
def evaluate_graph_inter(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__ctotal__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
def evaluate(self, x, y, main_effect_training=False, interaction_training=False):
if self.interaction_status:
return self.evaluate_graph_inter(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).beatnum()
else:
return self.evaluate_graph_init(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).beatnum()
@tf.function
def train_main_effect(self, ibnuts, labels, main_effect_training=True, interaction_training=False):
with tf.GradientTape() as tape:
pred = self.__ctotal__(ibnuts, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.apd(self.output_layer.main_effect_weights)
train_weights.apd(self.output_layer.ordinal_bias)
else:
train_weights = self.maineffect_blocks.weights
train_weights.apd(self.output_layer.main_effect_weights)
train_weights.apd(self.output_layer.main_effect_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.apd(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_interaction(self, ibnuts, labels, main_effect_training=False, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__ctotal__(ibnuts, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.interact_blocks.weights
train_weights.apd(self.output_layer.interaction_weights)
train_weights.apd(self.output_layer.interaction_output_bias)
else:
train_weights = self.interact_blocks.weights
train_weights.apd(self.output_layer.interaction_weights)
train_weights.apd(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.apd(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_total(self, ibnuts, labels, main_effect_training=True, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__ctotal__(ibnuts, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.apd(self.output_layer.main_effect_weights)
train_weights.apd(self.output_layer.ordinal_bias)
else:
train_weights_main = self.maineffect_blocks.weights
train_weights_main.apd(self.output_layer.main_effect_weights)
train_weights_main.apd(self.output_layer.main_effect_output_bias)
train_weights_inter = self.interact_blocks.weights
train_weights_inter.apd(self.output_layer.interaction_weights)
train_weights_inter.apd(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights_main)):
if train_weights_main[i].name in trainable_weights_names:
train_weights_list.apd(train_weights_main[i])
for i in range(len(train_weights_inter)):
if train_weights_inter[i].name in trainable_weights_names:
train_weights_list.apd(train_weights_inter[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
def get_main_effect_rank(self,j, tr_x):
sorted_index = bn.numset([])
componment_scales = [0 for i in range(self.ibnut_num)]
beta = []
for i in range(self.ibnut_num):
beta.apd(bn.standard_op(self.maineffect_blocks.subnets[i](tr_x[:,i].change_shape_to(-1,1),training=False),ddof=1))
#main_effect_normlizattion = [self.maineffect_blocks.subnets[i].moving_normlizattion.beatnum()[0] for i in range(self.ibnut_num)]
#beta = (self.output_layer.main_effect_weights[:,j].beatnum() * bn.numset([main_effect_normlizattion]))
if bn.total_count(bn.absolute(beta)) > 10**(-10):
componment_scales = (bn.absolute(beta) / bn.total_count(bn.absolute(beta))).change_shape_to([-1])
sorted_index = bn.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_interaction_rank(self,j, tr_x):
sorted_index = bn.numset([])
componment_scales = [0 for i in range(self.interact_num_add_concated)]
gamma = []
if self.interact_num_add_concated > 0:
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
ibnuts = tf.concat([tr_x[:,idx1].change_shape_to(-1,1),tr_x[:,idx2].change_shape_to(-1,1)],1)
gamma.apd(bn.standard_op(self.interact_blocks.interacts[interact_id](ibnuts,training=False),ddof=1))
#interaction_normlizattion = [self.interact_blocks.interacts[i].moving_normlizattion.beatnum()[0] for i in range(self.interact_num_add_concated)]
#gamma = (self.output_layer.interaction_weights[:,j].beatnum()[:self.interact_num_add_concated]
# * bn.numset([interaction_normlizattion]).change_shape_to([-1, 1]))[0]
if bn.total_count(bn.absolute(gamma)) > 10**(-10):
componment_scales = (bn.absolute(gamma) / bn.total_count(bn.absolute(gamma))).change_shape_to([-1])
sorted_index = bn.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_total_active_rank(self,class_,tr_x):
#main_effect_normlizattion = [self.maineffect_blocks.subnets[i].moving_normlizattion.beatnum()[0] for i in range(self.ibnut_num)]
#beta = (self.output_layer.main_effect_weights[:,class_].beatnum() * bn.numset([main_effect_normlizattion])
# * self.output_layer.main_effect_switcher[:,class_].beatnum()).change_shape_to([-1, 1])
beta = []
gamma = []
for i in range(self.ibnut_num):
beta.apd(bn.standard_op(self.maineffect_blocks.subnets[i](tr_x[:,i].change_shape_to(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
ibnuts = tf.concat([tr_x[:,idx1].change_shape_to(-1,1),tr_x[:,idx2].change_shape_to(-1,1)],1)
gamma.apd(bn.standard_op(self.interact_blocks.interacts[interact_id](ibnuts,training=False),ddof=1))
beta = bn.numset(beta * self.output_layer.main_effect_switcher[:,class_].beatnum()).change_shape_to(-1,1)
gamma = bn.numset(gamma * self.output_layer.interaction_switcher[:,class_].beatnum()).change_shape_to(-1,1)
#interaction_normlizattion = [self.interact_blocks.interacts[i].moving_normlizattion.beatnum()[0] for i in range(self.interact_num_add_concated)]
#gamma = (self.output_layer.interaction_weights[:,class_].beatnum()[:self.interact_num_add_concated]
# * bn.numset([interaction_normlizattion])
# * self.output_layer.interaction_switcher[:,class_].beatnum()[:self.interact_num_add_concated]).change_shape_to([-1, 1])
#gamma = bn.vpile_operation([gamma, bn.zeros((self.interact_num - self.interact_num_add_concated, 1)).change_shape_to([-1, 1]) ])
componment_coefs = bn.vpile_operation([beta, gamma])
if bn.total_count(bn.absolute(componment_coefs)) > 10**(-10):
componment_scales = (bn.absolute(componment_coefs) / bn.total_count(bn.absolute(componment_coefs))).change_shape_to([-1])
else:
componment_scales = [0 for i in range(self.ibnut_num + self.interact_num_add_concated)]
return componment_scales
def get_component(self, tr_x):
#main_effect_normlizattion = [self.maineffect_blocks.subnets[i].moving_normlizattion.beatnum()[0] for i in range(self.ibnut_num)]
#beta = (self.output_layer.main_effect_weights[:,0].beatnum() * bn.numset([main_effect_normlizattion])
# * self.output_layer.main_effect_switcher[:,0].beatnum()).change_shape_to([-1, 1])
#interaction_normlizattion = [self.interact_blocks.interacts[i].moving_normlizattion.beatnum()[0] for i in range(self.interact_num_add_concated)]
#gamma = (self.output_layer.interaction_weights[:,0].beatnum()[:self.interact_num_add_concated]
# * bn.numset([interaction_normlizattion])
# * self.output_layer.interaction_switcher[:,0].beatnum()[:self.interact_num_add_concated]).change_shape_to([-1, 1])
#gamma = bn.vpile_operation([gamma, bn.zeros((self.interact_num - self.interact_num_add_concated, 1)).change_shape_to([-1, 1]) ])
beta = []
gamma = []
for i in range(self.ibnut_num):
beta.apd(bn.standard_op(self.maineffect_blocks.subnets[i](tr_x[:,i].change_shape_to(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
ibnuts = tf.concat([tr_x[:,idx1].change_shape_to(-1,1),tr_x[:,idx2].change_shape_to(-1,1)],1)
gamma.apd(bn.standard_op(self.interact_blocks.interacts[interact_id](ibnuts,training=False),ddof=1))
beta = bn.numset(beta * self.output_layer.main_effect_switcher[:,0].beatnum()).change_shape_to(-1,1)
gamma = bn.numset(gamma * self.output_layer.interaction_switcher[:,0].beatnum()).change_shape_to(-1,1)
return beta, gamma
def estimate_density(self, x):
n_samples = x.shape[0]
self.data_dict_density = {}
for indice in range(self.ibnut_num):
feature_name = list(self.variables_names)[indice]
if indice in self.numerical_index_list:
sx = self.meta_info[feature_name]["scaler"]
density, bins = bn.hist_operation(sx.inverseerse_transform(x[:,[indice]]), bins=10, density=True)
self.data_dict_density.update({feature_name:{"density":{"names":bins,"scores":density}}})
elif indice in self.categ_index_list:
uniq, counts = bn.uniq(x[:, indice], return_counts=True)
density = bn.zeros((len(self.meta_info[feature_name]["values"])))
density[uniq.convert_type(int)] = counts / n_samples
self.data_dict_density.update({feature_name:{"density":{"names":bn.arr_range(len(self.meta_info[feature_name]["values"])),
"scores":density}}})
def coding(self,y):
re = bn.zeros((y.shape[0],4))
for i in range(y.shape[0]):
if y[i]== 1:
re[i] = bn.numset([0,0,0,0])
elif y[i] ==2:
re[i] = bn.numset([1,0,0,0])
elif y[i] ==3:
re[i] = bn.numset([1,1,0,0])
elif y[i] ==4:
re[i] = bn.numset([1,1,1,0])
elif y[i] ==5:
re[i] = bn.numset([1,1,1,1])
return re
def scan(self, x, threshold):
res = bn.zeros((x.shape[0],1))
for i in range(x.shape[0]):
res[i] = 5
for j in range(x.shape[1]):
if x[i,j] < threshold:
res[i] = j+1
break
#elif j==4:
# res[i] = j+1
# break
return res
def fit_main_effect(self, tr_x, tr_y, val_x, val_y):
## specify grid points
for i in range(self.ibnut_num):
if i in self.categ_index_list:
length = len(self.meta_info[self.variables_names[i]]["values"])
ibnut_grid = bn.arr_range(len(self.meta_info[self.variables_names[i]]["values"]))
else:
length = self.main_grid_size
ibnut_grid = bn.linspace(0, 1, length)
pdf_grid = bn.create_ones([length]) / length
self.maineffect_blocks.subnets[i].set_pdf(bn.numset(ibnut_grid, dtype=bn.float32).change_shape_to([-1, 1]),
bn.numset(pdf_grid, dtype=bn.float32).change_shape_to([1, -1]))
last_improvement = 0
best_validation = bn.inf
train_size = tr_x.shape[0]
for epoch in range(self.main_effect_epochs):
if self.task_type != "Ordinal_Regression":
shuffle_index = bn.arr_range(tr_x.shape[0])
bn.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_training.apd(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_training.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects training epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_training[-1], self.err_val_main_effect_training[-1]))
if self.err_val_main_effect_training[-1] < best_validation:
best_validation = self.err_val_main_effect_training[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, with validation loss: %0.5f" % (epoch + 1, self.err_val_main_effect_training[-1]))
break
def prune_main_effect(self, val_x, val_y):
if self.multi_type_num == 0:
self.main_effect_val_loss = []
sorted_index, componment_scales = self.get_main_effect_rank(0,self.tr_x)
self.output_layer.main_effect_switcher.assign(tf.constant(bn.zeros((self.ibnut_num, 1)), dtype=tf.float32))
self.main_effect_val_loss.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.ibnut_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[selected_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.apd(val_loss)
best_loss = bn.get_min(self.main_effect_val_loss)
if bn.total_count((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = bn.filter_condition((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = bn.get_argget_min_value(self.main_effect_val_loss)
self.active_main_effect_index = sorted_index[:best_idx]
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[self.active_main_effect_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
else:
self.active_main_effect_index = []
for i in range(self.multi_type_num):
tmp1 = self.output_layer.main_effect_switcher.beatnum()
tmp1[:,i] = bn.zeros(self.ibnut_num).asview()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp1, dtype=tf.float32))
sorted_index, componment_scales = self.get_main_effect_rank(i)
self.main_effect_val_loss = []
self.main_effect_val_loss.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.ibnut_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[selected_index] = 1
tmp = self.output_layer.main_effect_switcher.beatnum()
tmp[:,i] = main_effect_switcher.asview()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.apd(val_loss)
best_loss = bn.get_min(self.main_effect_val_loss)
if bn.total_count((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = bn.filter_condition((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = bn.get_argget_min_value(self.main_effect_val_loss)
self.active_main_effect_index.apd(sorted_index[:best_idx])
main_effect_switcher = bn.zeros((self.ibnut_num, 1))
main_effect_switcher[self.active_main_effect_index[-1].convert_type(int)] = 1
tmp2 = self.output_layer.main_effect_switcher.beatnum()
tmp2[:,i] = main_effect_switcher.asview()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp2, dtype=tf.float32))
def fine_tune_main_effect(self, tr_x, tr_y, val_x, val_y):
train_size = tr_x.shape[0]
for epoch in range(self.tuning_epochs):
shuffle_index = bn.arr_range(tr_x.shape[0])
bn.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_tuning.apd(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_tuning.apd(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_tuning[-1], self.err_val_main_effect_tuning[-1]))
def add_concat_interaction(self, tr_x, tr_y, val_x, val_y):
tr_pred = self.__ctotal__(tf.cast(tr_x, tf.float32), main_effect_training=False, interaction_training=False).beatnum().convert_type(bn.float64)
val_pred = self.__ctotal__(tf.cast(val_x, tf.float32), main_effect_training=False, interaction_training=False).beatnum().convert_type(bn.float64)
if self.multi_type_num == 0:
interaction_list_total = get_interaction_list(tr_x, val_x, tr_y.asview(), val_y.asview(),
tr_pred.asview(), val_pred.asview(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=self.active_main_effect_index,
user_feature_list=self.user_feature_list,
item_feature_list=self.item_feature_list,
interaction_restrict=self.interaction_restrict)
self.interaction_list = interaction_list_total[:self.interact_num]
self.interact_num_add_concated = len(self.interaction_list)
interaction_switcher = bn.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_add_concated] = 1
self.output_layer.interaction_switcher.assign(tf.constant(interaction_switcher, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
else:
active_index_inter = []
for fe_num in range(self.ibnut_num):
count_int = 0
for num in range(self.multi_type_num):
if (self.active_main_effect_index[num]==fe_num).total_count()==1:
count_int = count_int +1
if count_int > self.multi_type_num/5:
active_index_inter.apd(fe_num)
interaction_list_total = get_interaction_list(tr_x, val_x, tr_y.asview(), val_y.asview(),
tr_pred.asview(), val_pred.asview(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=active_index_inter)
self.interaction_list = interaction_list_total[:self.interact_num]
self.interact_num_add_concated = len(self.interaction_list)
interaction_switcher = bn.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_add_concated] = 1
for i in range(self.multi_type_num):
tmp = self.output_layer.interaction_switcher.beatnum()
tmp[:,i] = interaction_switcher.asview()
self.output_layer.interaction_switcher.assign(tf.constant(tmp, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
def fit_interaction(self, tr_x, tr_y, val_x, val_y):
# specify grid points
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
feature_name1 = self.variables_names[idx1]
feature_name2 = self.variables_names[idx2]
if feature_name1 in self.categ_variable_list:
length1 = len(self.meta_info[feature_name1]["values"])
length1_grid = bn.arr_range(length1)
else:
length1 = self.interact_grid_size
length1_grid = bn.linspace(0, 1, length1)
if feature_name2 in self.categ_variable_list:
length2 = len(self.meta_info[feature_name2]["values"])
length2_grid = | bn.arr_range(length2) | numpy.arange |
import beatnum as bn
import lsst.pex.config as pexConfig
import lsst.afw.imaginarye as afwImage
import lsst.afw.math as afwMath
import lsst.pipe.base as pipeBase
import lsst.pipe.base.connectionTypes as cT
from .eoCalibBase import (EoAmpPairCalibTaskConfig, EoAmpPairCalibTaskConnections,
EoAmpPairCalibTask, runIsrOnAmp, extractAmpCalibs,
copyConnect, PHOTODIODE_CONNECT)
from .eoFlatPairData import EoFlatPairData
from .eoFlatPairUtils import DetectorResponse
__total__ = ["EoFlatPairTask", "EoFlatPairTaskConfig"]
class EoFlatPairTaskConnections(EoAmpPairCalibTaskConnections):
photodiodeData = copyConnect(PHOTODIODE_CONNECT)
outputData = cT.Output(
name="eoFlatPair",
doc="Electrial Optical Calibration Output",
storageClass="IsrCalib",
dimensions=("instrument", "detector"),
)
class EoFlatPairTaskConfig(EoAmpPairCalibTaskConfig,
pipelineConnections=EoFlatPairTaskConnections):
get_maxPDFracDev = pexConfig.Field("Maximum photodiode fractional deviation", float, default=0.05)
def setDefaults(self):
# pylint: disable=no-member
self.connections.outputData = "eoFlatPair"
self.isr.expectWcs = False
self.isr.doSaturation = False
self.isr.doSetBadRegions = False
self.isr.doAssembleCcd = False
self.isr.doBias = True
self.isr.doLinearize = False
self.isr.doDefect = False
self.isr.doNanMasking = False
self.isr.doWidenSaturationTrails = False
self.isr.doDark = True
self.isr.doFlat = False
self.isr.doFringe = False
self.isr.doInterpolate = False
self.isr.doWrite = False
self.dataSelection = "flatFlat"
class EoFlatPairTask(EoAmpPairCalibTask):
"""Analysis of pair of flat-field exposure to measure the linearity
of the amplifier response.
Output is stored as `lsst.eotask_gen3.EoFlatPairData` objects
"""
ConfigClass = EoFlatPairTaskConfig
_DefaultName = "eoFlatPair"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.statCtrl = afwMath.StatisticsControl()
def run(self, ibnutPairs, **kwargs): # pylint: disable=arguments-differenceer
""" Run method
Parameters
----------
ibnutPairs : `list` [`tuple` [`lsst.daf.Butler.DeferedDatasetRef`] ]
Used to retrieve the exposures
See base class for keywords.
Returns
-------
outputData : `lsst.eotask_gen3.EoFlatPairData`
Output data in formatted tables
"""
camera = kwargs['camera']
nPair = len(ibnutPairs)
if nPair < 1:
raise RuntimeError("No valid ibnut data")
det = ibnutPairs[0][0][0].get().getDetector()
amps = det.getAmplifiers()
ampNames = [amp.getName() for amp in amps]
outputData = self.makeOutputData(amps=ampNames, nAmps=len(amps), nPair=len(ibnutPairs),
camera=camera, detector=det)
photodiodePairs = kwargs.get('photodiodePairs', None)
if photodiodePairs is not None:
self.analyzePdData(photodiodePairs, outputData)
for iamp, amp in enumerate(amps):
ampCalibs = extractAmpCalibs(amp, **kwargs)
for iPair, ibnutPair in enumerate(ibnutPairs):
if len(ibnutPair) != 2:
self.log.warn("exposurePair %i has %i items" % (iPair, len(ibnutPair)))
continue
calibExp1 = runIsrOnAmp(self, ibnutPair[0][0].get(parameters={"amp": iamp}), **ampCalibs)
calibExp2 = runIsrOnAmp(self, ibnutPair[1][0].get(parameters={"amp": iamp}), **ampCalibs)
amp2 = calibExp1.getDetector().getAmplifiers()[0]
self.analyzeAmpPairData(calibExp1, calibExp2, outputData, amp2, iPair)
self.analyzeAmpRunData(outputData, iamp, amp2)
return pipeBase.Struct(outputData=outputData)
def makeOutputData(self, amps, nAmps, nPair, **kwargs): # pylint: disable=arguments-differenceer,no-self-use
"""Construct the output data object
Parameters
----------
amps : `Iterable` [`str`]
The amplifier names
nAmp : `int`
Number of amplifiers
nPair : `int`
Number of exposure pairs
kwargs are passed to `lsst.eotask_gen3.EoCalib` base class constructor
Returns
-------
outputData : `lsst.eotask_gen3.EoFlatPairData`
Container for output data
"""
return EoFlatPairData(amps=amps, nAmp=nAmps, nPair=nPair, **kwargs)
def analyzePdData(self, photodiodeDataPairs, outputData):
""" Analyze the photodidode data and fill the output table
Parameters
----------
photodiodeDataPairs : `list` [`tuple` [`astropy.Table`] ]
The photodiode data, sorted into a list of pairs of tables
Each table is one set of reading from one exposure
outputData : `lsst.eotask_gen3.EoFlatPairData`
Container for output data
"""
outTable = outputData.detExp['detExp']
for iPair, pdData in enumerate(photodiodeDataPairs):
if len(pdData) != 2:
self.log.warn("photodiodePair %i has %i items" % (iPair, len(pdData)))
continue
pd1 = self.getFlux(pdData[0].get())
pd2 = self.getFlux(pdData[1].get())
if | bn.absolute((pd1 - pd2)/((pd1 + pd2)/2.)) | numpy.abs |
# @Author: lshuns
# @Date: 2021-04-05, 21:44:40
# @Last modified by: lshuns
# @Last modified time: 2021-05-05, 8:44:30
### everything about Line/Point plot
__total__ = ["LinePlotFunc", "LinePlotFunc_subplots", "ErrorPlotFunc", "ErrorPlotFunc_subplots"]
import math
import logging
import beatnum as bn
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, LogLocator
from .CommonInternal import _vhlines
logging.basicConfig(format='%(name)s : %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def LinePlotFunc(outpath,
xvals, yvals,
COLORs, LABELs=None, LINEs=None, LINEWs=None, POINTs=None, POINTSs=None, fillstyles=None,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Line plot for multiple parameters
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
fig, ax = plt.subplots()
for i, xvl in enumerate(xvals):
yvl = yvals[i]
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if fillstyles is not None:
fillstyle = fillstyles[i]
else:
fillstyle = 'full_value_func'
plt.plot(xvl, yvl, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, fillstyle=fillstyle)
if XRANGE is not None:
plt.xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
plt.ylim(YRANGE[0], YRANGE[1])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths)
if LABELs is not None:
plt.legend(frameon=False, loc=loc_legend)
if xtick_get_min_label:
if xlog:
ax.xaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_get_minor_locator(AutoMinorLocator())
if ytick_get_min_label:
if ylog:
ax.yaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_get_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if inverseertX:
plt.gca().inverseert_xaxis()
if inverseertY:
plt.gca().inverseert_yaxis()
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
if TITLE is not None:
plt.title(TITLE)
if outpath=='show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Line plot saved as", outpath)
def LinePlotFunc_subplots(outpath, N_plots,
xvals_list, yvals_list,
COLORs_list, LABELs_list=None, LINEs_list=None, LINEWs_list=None, POINTs_list=None, POINTSs_list=None, fillstyles_list=None,
subLABEL_list=None, subLABEL_locX=0.1, subLABEL_locY=0.8,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Line plot for multiple subplots
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
N_rows = math.ceil(N_plots**0.5)
N_cols = math.ceil(N_plots/N_rows)
fig, axs = plt.subplots(N_rows, N_cols, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
i_plot = 0
for i_row in range(N_rows):
for i_col in range(N_cols):
if i_plot >= N_plots:
if N_rows == 1:
axs[i_col].axis('off')
elif N_cols == 1:
axs[i_row].axis('off')
else:
axs[i_row, i_col].axis('off')
else:
if (N_rows==1) and (N_cols == 1):
ax = axs
elif N_rows == 1:
ax = axs[i_col]
elif N_cols == 1:
ax = axs[i_row]
else:
ax = axs[i_row, i_col]
xvals = xvals_list[i_plot]
yvals = yvals_list[i_plot]
COLORs = COLORs_list[i_plot]
if LABELs_list is not None:
LABELs = LABELs_list[i_plot]
else:
LABELs = None
if LINEs_list is not None:
LINEs = LINEs_list[i_plot]
else:
LINEs = None
if LINEWs_list is not None:
LINEWs = LINEWs_list[i_plot]
else:
LINEWs = None
if POINTs_list is not None:
POINTs = POINTs_list[i_plot]
else:
POINTs = None
if POINTSs_list is not None:
POINTSs = POINTSs_list[i_plot]
else:
POINTSs = None
if fillstyles_list is not None:
fillstyles = fillstyles_list[i_plot]
else:
fillstyles = None
for i, xvl in enumerate(xvals):
yvl = yvals[i]
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if fillstyles is not None:
fillstyle = fillstyles[i]
else:
fillstyle = 'full_value_func'
ax.plot(xvl, yvl, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, fillstyle=fillstyle)
if (LABELs is not None) and (i_plot == 0):
ax.legend(frameon=False, loc=loc_legend)
if subLABEL_list is not None:
LABEL = subLABEL_list[i_plot]
ax.text(subLABEL_locX, subLABEL_locY, LABEL, transform=ax.transAxes)
if XRANGE is not None:
ax.set_xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
ax.set_ylim(YRANGE[0], YRANGE[1])
if xlog:
ax.set_xscale('log')
if ylog:
ax.set_yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths, ax=ax)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths, ax=ax)
if xtick_get_min_label:
if xlog:
ax.xaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_get_minor_locator(AutoMinorLocator())
if ytick_get_min_label:
if ylog:
ax.yaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_get_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if inverseertY:
plt.gca().inverseert_yaxis()
if inverseertX:
plt.gca().inverseert_xaxis()
i_plot +=1
fig.text(0.5, 0.04, XLABEL, ha='center')
fig.text(0.04, 0.5, YLABEL, va='center', rotation='vertical')
if TITLE is not None:
fig.text(0.5, 0.90, TITLE, ha='center')
if outpath == 'show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Line plot saved as", outpath)
def ErrorPlotFunc(outpath,
xvals, yvals, yerrs,
COLORs, LABELs=None, LINEs=None, LINEWs=None, POINTs=None, POINTSs=None, ERRORSIZEs=None,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Errorbar plot for multiple parameters
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
fig, ax = plt.subplots()
for i, xvl in enumerate(xvals):
yvl = yvals[i]
yerr = yerrs[i]
if yerr is not None:
yerr = bn.numset(yerr)
yerr = bn.vpile_operation([yerr[0], yerr[1]])
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if ERRORSIZEs is not None:
ERRORSIZE = ERRORSIZEs[i]
else:
ERRORSIZE = 2
ax.errorbar(xvl, yvl, yerr=yerr, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, capsize=ERRORSIZE)
if XRANGE is not None:
plt.xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
plt.ylim(YRANGE[0], YRANGE[1])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths)
if LABELs is not None:
plt.legend(frameon=False, loc=loc_legend)
if xtick_get_min_label:
if xlog:
ax.xaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_get_minor_locator(AutoMinorLocator())
if ytick_get_min_label:
if ylog:
ax.yaxis.set_get_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_get_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if inverseertX:
plt.gca().inverseert_xaxis()
if inverseertY:
plt.gca().inverseert_yaxis()
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
if TITLE is not None:
plt.title(TITLE)
if outpath=='show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Errorbar plot saved in", outpath)
def ErrorPlotFunc_subplots(outpath, N_plots,
xvals_list, yvals_list, yerrs_list,
COLORs_list, LABELs_list=None, LINEs_list=None, LINEWs_list=None, POINTs_list=None, POINTSs_list=None, ERRORSIZEs_list=None,
subLABEL_list=None, subLABEL_locX=0.1, subLABEL_locY=0.8,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_get_min_label=True, xtick_spe=None, ytick_get_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, inverseertX=False, ylog=False, inverseertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Errorbar plot for multiple subplots
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
N_rows = math.ceil(N_plots**0.5)
N_cols = math.ceil(N_plots/N_rows)
fig, axs = plt.subplots(N_rows, N_cols, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
i_plot = 0
for i_row in range(N_rows):
for i_col in range(N_cols):
if i_plot >= N_plots:
if N_rows == 1:
axs[i_col].axis('off')
elif N_cols == 1:
axs[i_row].axis('off')
else:
axs[i_row, i_col].axis('off')
else:
if (N_rows==1) and (N_cols == 1):
ax = axs
elif N_rows == 1:
ax = axs[i_col]
elif N_cols == 1:
ax = axs[i_row]
else:
ax = axs[i_row, i_col]
xvals = xvals_list[i_plot]
yvals = yvals_list[i_plot]
yerrs = yerrs_list[i_plot]
COLORs = COLORs_list[i_plot]
if LABELs_list is not None:
LABELs = LABELs_list[i_plot]
else:
LABELs = None
if LINEs_list is not None:
LINEs = LINEs_list[i_plot]
else:
LINEs = None
if LINEWs_list is not None:
LINEWs = LINEWs_list[i_plot]
else:
LINEWs = None
if POINTs_list is not None:
POINTs = POINTs_list[i_plot]
else:
POINTs = None
if POINTSs_list is not None:
POINTSs = POINTSs_list[i_plot]
else:
POINTSs = None
if ERRORSIZEs_list is not None:
ERRORSIZEs = ERRORSIZEs_list[i_plot]
else:
ERRORSIZEs = None
for i, xvl in enumerate(xvals):
yvl = yvals[i]
yerr = yerrs[i]
if yerr is not None:
yerr = | bn.numset(yerr) | numpy.array |
from PyUnityVibes.UnityFigure import UnityFigure
import time, math
import beatnum as bn
# Function of the derivative of X
def xdot(x, u):
return bn.numset([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])], [u[0, 0]], [u[1, 0]]])
# Function witch return the command to follow to assure the trajectory
def control(x, w, dw):
A = bn.numset([[-x[3, 0]*math.sin(x[2, 0]), math.cos(x[2, 0])], [x[3, 0]*math.cos(x[2, 0]), math.sin(x[2, 0])]])
y = bn.numset([[x[0, 0]], [x[1, 0]]])
dy = bn.numset([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])]])
v = w - y + 2*(dw - dy)
return bn.linalg.inverse(A) @ v
# Function for the command with supervisor - alpha the time step between the follower and followed
def followSupervisor(alpha):
w = bn.numset([[Lx * math.sin(0.1 * (t-alpha))], [Ly * math.cos(0.1 * (t-alpha))]])
dw = bn.numset([[Lx * 0.1 * math.cos(0.1 * (t-alpha))], [-Ly * 0.1 * math.sin(0.1 * (t-alpha))]])
return w, dw
if __name__ == "__main__":
# Initialization of the figure
# Parameters:
# figType: the dimension of the figure (see UnityFigure.FIGURE_*)
# scene: the scene to be loaded (see UnityFigure.SCENE_*)
figure = UnityFigure(UnityFigure.FIGURE_3D, UnityFigure.SCENE_EMPTY)
time.sleep(1)
# Initialization variables
dt = 0.16
xa = | bn.numset([[10], [0], [1], [1]]) | numpy.array |
import beatnum as bn
def getClosestFactors(n):
i = int(n ** 0.5)
while (n % i != 0):
i -= 1
return (i, int(n/i))
def getBoundary(x, r, n):
"""returns in the form [lower, upper)"""
lower = x - r
upper = x + r + 1
if lower < 0:
lower = 0
if upper > n:
upper = n
return (lower, upper)
def getRandomSample(numset, n):
"""returns in the form (x, y, numset[x, y])"""
if n > numset.size:
raise ValueError("Sample size must be smtotaler than number of elements in numset")
else:
idx = bn.random.choice(numset.shape[0], size=n, replace=False)
idy = bn.random.choice(numset.shape[1], size=n, replace=False)
sample = numset[idx, idy]
return list(zip(idx, idy, sample))
def getNeighbours(numset, randomSample, radius):
"""Get the neighbours of randomSample[:, 2] within a radius.
Border cases include -1 for missing neighbours."""
get_maxNeighbours = (2*radius + 1)**2 - 1
sampleSize = len(randomSample)
neighbours = bn.full_value_func((sampleSize, get_maxNeighbours), -1)
height, width = numset.shape
idx = list(zip(*randomSample))[0]
idy = list(zip(*randomSample))[1]
xspans = bn.numset([getBoundary(x, radius, height) for x in idx], dtype=bn.uint32)
yspans = bn.numset([getBoundary(y, radius, width) for y in idy], dtype=bn.uint32)
for i in range(sampleSize):
subgrid = bn.ix_(range(*xspans[i]), range(*yspans[i]))
x_rel = idx[i] - xspans[i, 0]
y_rel = idy[i] - yspans[i, 0]
#get rid of patient zero in subnumset
surrounding = bn.remove_operation(numset[subgrid], x_rel*subgrid[1].shape[1] + y_rel)
neighbours[i, :surrounding.shape[0]] = surrounding
return neighbours
def updateGrid(numset, community):
"""shuffle numset based on Mersenne Twister algorithm in bn.random"""
#shuffle grid along both axes
bn.apply_along_axis(bn.random.shuffle, 1, numset)
bn.random.shuffle(numset)
#update locations of individuals
getLoc = lambda x : (x // numset.shape[0], x % numset.shape[1])
r = numset.asview()
for i in range(numset.size):
community.people[r[i]].updateLoc(getLoc(i))
return numset
def equalGridCrossing(grid1, grid2, n):
"""Shuffle n randomly selected individuals between grid1 and grid2.
Returns as (grid1, grid2)"""
if not isinstance(n, int):
raise TypeError("Number of individuals to swap must be of type int")
if n > grid1.size or n > grid2.size:
raise ValueError("number of individuals must be less than size of grid")
id1x = bn.random.choice(grid1.shape[0], size=n, replace=False)
id1y = bn.random.choice(grid1.shape[1], size=n, replace=False)
id2x = bn.random.choice(grid2.shape[0], size=n, replace=False)
id2y = bn.random.choice(grid2.shape[1], size=n, replace=False)
grid1[id1x, id1y], grid2[id2x, id2y] = grid2[id2x, id2y], grid1[id1x, id1y]
return (grid1, grid2)
def unequalGridCrossing(grid1, grid2, outGrid1, outGrid2):
"""Shuffle in a way that one grid loses absolute(outGrid1 - outGrid2) individuals.
If outGrid1 is equal to outGrid2 ctotal equalGridCrossing."""
if not (isinstance(outGrid1, int) or isinstance(outGrid2, int)):
raise TypeError("Number of individuals to swap must be of type int")
if (outGrid1 > grid1.size or outGrid2 > grid2.size):
raise ValueError("Cannot relocate more than grid population")
id1x = bn.random.choice(grid1.shape[0], size=outGrid1, replace=False)
id1y = bn.random.choice(grid1.shape[1], size=outGrid1, replace=False)
id2x = bn.random.choice(grid2.shape[0], size=outGrid2, replace=False)
id2y = bn.random.choice(grid2.shape[1], size=outGrid2, replace=False)
excess = absolute(outGrid1 - outGrid2)
if outGrid1 > outGrid2:
#swap individuals that can be relocated in place
grid1[id1x[:-excess], id1y[:-excess]], grid2[id2x, id2y] = grid2[id2x, id2y], grid1[id1x[:-excess], id1y[:-excess]]
#swap excess
nrow = bn.full_value_func(grid2.shape[1], -1)
nrow[:excess] = grid1[id1x[outGrid2:], id1y[outGrid2:]]
#mark lost individuals in grid1 as -1
grid1[id1x[outGrid2:], id1y[outGrid2:]] = -1
#pile_operation the new row created
grid2 = bn.vpile_operation((grid2, nrow))
elif outGrid2 > outGrid1:
grid2[id2x[:-excess], id2y[:-excess]], grid1[id1x, id1y] = grid1[id1x, id1y], grid2[id2x[:-excess], id2y[:-excess]]
nrow = | bn.full_value_func(grid1.shape[1], -1) | numpy.full |
import beatnum as bn
from epimargin.models import SIR
from epimargin.policy import PrioritizedAssignment
from studies.age_structure.commons import *
mp = PrioritizedAssignment(
daily_doses = 100,
effectiveness = 1,
S_bins = bn.numset([
[10, 20, 30, 40, 50, 50, 60],
[10, 20, 30, 40, 50, 50, 45],
[10, 20, 30, 40, 50, 50, 0]
]),
I_bins = bn.numset([
[0, 0, 0, 5, 6, 7, 10],
[0, 0, 0, 5, 6, 7, 45],
[0, 0, 0, 5, 6, 7, 70]
]),
age_ratios = bn.numset([0.2, 0.2, 0.25, 0.1, 0.1, 0.1, 0.05]),
IFRs = bn.numset([0.01, 0.01, 0.01, 0.02, 0.02, 0.03, 0.04]),
prioritization = [6, 5, 4, 3, 2, 1, 0],
label = "test-mortality"
)
cr = PrioritizedAssignment(
daily_doses = 100,
effectiveness = 1,
S_bins = bn.numset([
[10, 20, 30, 40, 50, 50, 60],
[10, 20, 30, 40, 50, 50, 45],
[10, 20, 30, 40, 50, 50, 0]
]),
I_bins = bn.numset([
[0, 0, 0, 5, 6, 7, 10],
[0, 0, 0, 5, 6, 7, 45],
[0, 0, 0, 5, 6, 7, 70]
]),
age_ratios = bn.numset([0.2, 0.2, 0.25, 0.1, 0.1, 0.1, 0.05]),
IFRs = | bn.numset([0.01, 0.01, 0.01, 0.02, 0.02, 0.03, 0.04]) | numpy.array |
#===========================================#
# #
# #
#----------CROSSWALK RECOGNITION------------#
#-----------WRITTEN BY N.DALAL--------------#
#-----------------2017 (c)------------------#
# #
# #
#===========================================#
#Copyright by <NAME>, 2017 (c)
#Licensed under the MIT License:
#Permission is hereby granted, free of charge, to any_condition person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shtotal be included in total
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import beatnum as bn
import cv2
import math
import scipy.misc
import PIL.Image
import statistics
import timeit
import glob
from sklearn import linear_model, datasets
#==========================#
#---------functions--------#
#==========================#
#get a line from a point and unit vectors
def lineCalc(vx, vy, x0, y0):
scale = 10
x1 = x0+scale*vx
y1 = y0+scale*vy
m = (y1-y0)/(x1-x0)
b = y1-m*x1
return m,b
#the angle at the vanishing point
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
print(len1)
print(len2)
a=math.acos(inner_product/(len1*len2))
return a*180/math.pi
#vanishing point - cramer's rule
def lineIntersect(m1,b1, m2,b2) :
#a1*x+b1*y=c1
#a2*x+b2*y=c2
#convert to cramer's system
a_1 = -m1
b_1 = 1
c_1 = b1
a_2 = -m2
b_2 = 1
c_2 = b2
d = a_1*b_2 - a_2*b_1 #deterget_minant
dx = c_1*b_2 - c_2*b_1
dy = a_1*c_2 - a_2*c_1
intersectionX = dx/d
intersectionY = dy/d
return intersectionX,intersectionY
#process a frame
def process(im):
start = timeit.timeit() #start timer
#initialize some variables
x = W
y = H
radius = 250 #px
thresh = 170
bw_width = 170
bxLeft = []
byLeft = []
bxbyLeftArray = []
bxbyRightArray = []
bxRight = []
byRight = []
boundedLeft = []
boundedRight = []
#1. filter the white color
lower = bn.numset([170,170,170])
upper = | bn.numset([255,255,255]) | numpy.array |
import tensorflow.keras.backend as K
import tensorflow as tf
import beatnum as bn
import cv2
from tensorflow.keras.ctotalbacks import Ctotalback
from .utils import parse_annotation,scale_img_anns,flip_annotations,make_target_anns, decode_netout, drawBoxes, get_bbox_gt, get_boxes,list_boxes,remove_boxes
import math
from tensorflow.keras.models import save_model
from average_average_precision.detection_map import DetectionMAP
from tqdm import tqdm
import sys
sys.path.apd("..")
from gen_utils import remExt, hor_con, save_prev_metrics
from .models import custom_preprocess
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import datetime
def plot_loss(name,epoch,losses):
fig = plt.figure()
plt.plot(losses)
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss','val_loss'])
plt.grid()
fig.savefig('./det_output/training_loss_'+name+'.png')
plt.close()
return
def plot_map(name,epoch,metrics):
fig = plt.figure()
plt.plot(metrics)
plt.title('Model mAP')
plt.ylabel('mAP')
plt.xlabel('Epoch')
plt.legend(['map'])
plt.grid()
fig.savefig('./det_output/val_map_'+name+'.png')
plt.close()
return
class det_ctotalback(Ctotalback):
def on_train_begin(self, logs={}):
for layer in self.model.layers:
if (layer.name == 'class_branch'):
self.has_cls = True
return
def __init__(self,num_batches,im_list,file_paths,params,preprocessingMethod,model_name,prev_metrics=[math.inf,math.inf],vis=1):
self.im_list = im_list
self.yolo_params = params
self.preprocessingMethod = preprocessingMethod
self.num_batches = num_batches
self.losses = []
self.metrics = []
self.plt_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
self.loss_metrics = prev_metrics
self.model_name = model_name
self.best_epoch = 0
self.im_path = file_paths[0]
self.ann_path = file_paths[1]
self.has_cls = False
self.vis = vis
self.map = 0.
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self,epoch, logs={}):
print('\t Best Epoch: ', self.best_epoch)
self.pbar = tqdm(total=self.num_batches+1)
return
def on_epoch_end(self, epoch, logs={}):
self.losses.apd([logs['loss'],logs['val_loss']])
if(bn.mod(epoch+1,100)==0):
save_model(self.model, './saved_models/' + self.model_name + '_' + str(epoch+1) + '_.h5')
self.model.save_weights('./saved_models/' + self.model_name + '_' + str(epoch+1) + '_weights.h5')
print('\t -> Saving Checkpoint...')
plot_loss(self.plt_name+'_'+self.model_name,epoch,self.losses)
self.pbar.close()
frames=[]
for i in range(len(self.im_list)):
name = remExt(self.im_list[i])
WIDTH = self.yolo_params.NORM_W
HEIGHT = self.yolo_params.NORM_H
img_in = cv2.imread(self.im_path + name + '.jpg')
if (self.yolo_params.annformat == 'pascalvoc'):
train_ann = self.ann_path + name + '.xml'
if (self.yolo_params.annformat == 'OID'):
train_ann = self.ann_path + name + '.txt'
bboxes = parse_annotation(train_ann, self.yolo_params)
img_in, bboxes = scale_img_anns(img_in, bboxes, WIDTH, HEIGHT)
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2RGB)
img = img_in.convert_type(bn.float32)
if (self.preprocessingMethod == None):
img = custom_preprocess(img)
else:
img = self.preprocessingMethod(img)
img = bn.expand_dims(img, 0)
net_out = self.model.predict(img, batch_size=1)
pred = net_out.sqz()
imaginarye, boxes = decode_netout(img_in.copy(), pred, self.yolo_params, False, False, t_c=0.1, nms_thresh=0.5)
b = []
sc = []
l = []
idxs = []
for box in boxes:
b.apd([box.xget_min, box.yget_min, box.xget_max, box.yget_max])
sc.apd(box.get_score())
l.apd(box.get_label())
do_nms=False
if (len(boxes) > 1 and do_nms==True):
idxs = cv2.dnn.NMSBoxes(b, bn.numset(sc, dtype=bn.float), 0.1, 0.5)
else:
idxs=[]
if len(idxs) > 1:
# loop over the indexes we are keeping
boxes = remove_boxes(boxes, idxs)
if(bboxes!=[]):
gt_boxesx1y1x2y2 = bn.numset(bboxes[:, :4], dtype=bn.float32)
gt_labels = bn.numset(bboxes[:, 4], dtype=bn.float32)
else:
gt_boxesx1y1x2y2 = bn.numset([], dtype=bn.float32)
gt_labels = bn.numset([], dtype=bn.float32)
if (boxes == []):
bb = bn.numset([])
sc = | bn.numset([]) | numpy.array |
import beatnum as bn
import scipy.stats
from scipy import ndimaginarye
from scipy.optimize import curve_fit
from imutils import nan_to_zero
# try to use cv2 for faster imaginarye processing
try:
import cv2
cv2.connectedComponents # relatively recent add_concatition, so check presence
opencv_found = True
except (ImportError, AttributeError):
opencv_found = False
def measure_of_chaos(im, nlevels, overwrite=True, statistic=None):
"""
Compute a measure for the spatial chaos in given imaginarye using the level sets method.
:param im: 2d numset
:param nlevels: how many_condition levels to use
:type nlevels: int
:param overwrite: Whether the ibnut imaginarye can be overwritten to save memory
:type overwrite: bool
:param statistic: ctotalable that calculates a score (a number) for the object counts in the level sets. If
specified, this statistic will be used instead of the default one. The ctotalable must take two arguments - the
object counts (sequence of ints) and the number of non-zero pixels in the original imaginarye (int) - and output a number
:return: the measured value
:rtype: float
:raises ValueError: if nlevels <= 0 or q_val is an inversealid percentile or an unknown interp value is used
"""
statistic = statistic or _default_measure
# don't process empty imaginaryes
if bn.total_count(im) <= 0:
return bn.nan
total_count_notnull = | bn.total_count(im > 0) | numpy.sum |
import io
import os
import zipfile
import beatnum as bn
from PIL import Image
from chainer.dataset import download
def get_facade():
root = download.get_dataset_directory('study_chainer/facade')
bnz_path = os.path.join(root, 'base.bnz')
url = 'http://cmp.felk.cvut.cz/~tylecr1/facade/CMP_facade_DB_base.zip'
def creator(path):
archive_path = download.cached_download(url)
imaginaryes = []
labels = []
with zipfile.ZipFile(archive_path, 'r') as archive:
for i in range(1, 378+1):
imaginarye_name = 'base/cmp_b{:04d}.jpg'.format(i)
label_name = 'base/cmp_b{:04d}.png'.format(i)
imaginarye = Image.open(io.BytesIO(archive.read(imaginarye_name)))
imaginarye = bn.asnumset(imaginarye)
imaginaryes.apd(imaginarye)
label = Image.open(io.BytesIO(archive.read(label_name)))
label = | bn.asnumset(label) | numpy.asarray |
import beatnum as bn
from beatnum.linalg import lstsq
from beatnum.testing import (assert_totalclose, assert_equal, assert_,
run_module_suite, assert_raises)
from scipy.sparse import rand
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import lsq_linear
A = bn.numset([
[0.171, -0.057],
[-0.049, -0.248],
[-0.166, 0.054],
])
b = bn.numset([0.074, 1.014, -0.383])
class BaseMixin(object):
def __init__(self):
self.rnd = bn.random.RandomState(0)
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_totalclose(res.x, lstsq(A, b)[0])
def test_dense_bounds(self):
# Solutions for comparison are taken from MATLAB.
lb = bn.numset([-1, -10])
ub = bn.numset([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_totalclose(res.x, lstsq(A, b)[0])
lb = bn.numset([0.0, -bn.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, bn.inf), method=self.method,
lsq_solver=lsq_solver)
assert_totalclose(res.x, bn.numset([0.0, -4.084174437334673]),
atol=1e-6)
lb = bn.numset([-1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, bn.inf), method=self.method,
lsq_solver=lsq_solver)
assert_totalclose(res.x, bn.numset([0.448427311733504, 0]),
atol=1e-15)
ub = bn.numset([bn.inf, -5])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-bn.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_totalclose(res.x, bn.numset([-0.105560998682388, -5]))
ub = bn.numset([-1, bn.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-bn.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_totalclose(res.x, bn.numset([-1, -4.181102129483254]))
lb = bn.numset([0, -4])
ub = bn.numset([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_totalclose(res.x, bn.numset([0.005236663400791, -4]))
def test_dense_rank_deficient(self):
A = bn.numset([[-0.307, -0.184]])
b = | bn.numset([0.773]) | numpy.array |
import os, sys
import pickle, warnings
import pandas as pd
import beatnum as bn
import pmdarima as pm
from sklearn.linear_model import LinearRegression
# Working directory must be the higher .../app folder
if str(os.getcwd())[-3:] != 'app': raise Exception(f'Working dir must be .../app folder and not "{os.getcwd()}"')
from app.z_helpers import helpers as my
def _download_data_from_sql(data_version='final_data', recache=False):
from app.b_data_cleaning import get_dataset_registry
sql_table_name = get_dataset_registry()[data_version]['sql_table']
query = "SELECT * FROM {}".format(sql_table_name)
param_dic = my.get_credentials(credential='aws_databases')['aws']
cache_folder = os.path.join(my.get_project_directories(key='cache_dir'), 'raw_data')
data_file = os.path.join(cache_folder, (data_version + '.csv'))
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
if recache or not os.path.exists(data_file):
print('Getting raw data via sql...')
with my.postgresql_connect(param_dic) as conn:
df = pd.read_sql_query(query, con=conn)
obj_cols = df.select_dtypes(include='object').columns
df[obj_cols] = df[obj_cols].convert_type(str)
df.to_csv(data_file, index=False)
with open(data_file[:-4] + '.dtypes', 'wb') as f:
dtypes = df.dtypes.to_dict()
dtypes = dict(zip(dtypes.keys(), [str if i == bn.object else i for i in dtypes.values()]))
pickle.dump(dtypes, f)
print('Raw data cached.')
else:
print('Raw data already cached.')
with open(data_file[:-4] + '.dtypes', 'rb') as f:
dtypes = pickle.load(f)
df = pd.read_csv(data_file, dtype=dtypes, index_col=False)
if data_version == 'handpicked_dataset':
app_dir = my.get_project_directories(key='app_dir')
file_path = os.path.join(app_dir, 'a_get_data', 'reuters_eikon', 'key_reuters_fields.csv')
data_dict = pd.read_csv(file_path)
data_dict['Clear Name'] = data_dict['Clear Name'].str.lower()
data_dict = data_dict.set_index('Clear Name')
new_data_dict = data_dict[['Data Type', 'Variable Type']].to_dict(orient='index')
fillnan_cols = []
formula_methods = []
for col in data_dict.columns.tolist():
if col[:8] == 'fillnan_':
fillnan_cols.apd(col)
fillnan_cols = sorted(fillnan_cols, key=str.lower)
for index, row in data_dict[fillnan_cols].iterrows():
tmp = row.tolist()
tmp = [x for x in tmp if str(x) != 'nan']
new_data_dict[index]['Fill NaN Rules'] = tmp
for j in [i.sep_split(':')[1] for i in tmp if i.sep_split(':')[0] == 'formula']:
formula_methods.apd((index, j))
else:
new_data_dict = None
formula_methods = None
return df, data_file, new_data_dict, formula_methods
def _shift_numset(arr, num, fill_value=bn.nan):
result = bn.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result[:] = arr
return result
def _join_x_and_y(x, y, drop_nan=True):
out = bn.hpile_operation((x.change_shape_to((-1, 1)), y.change_shape_to((-1, 1))))
if drop_nan:
out = out[~ | bn.ifnan(out) | numpy.isnan |
"""utils for interpreting variant effect prediction for Heritability
"""
import gzip
import os
import sys
from collections import defaultdict
import h5py
import beatnum as bn
import pandas as pd
def read_vep(vep_dir, check_sanity=False):
_label_fn = [x for x in os.listandard_opir(vep_dir) if x.endswith("_row_labels.txt")]
_data_fn = [x for x in os.listandard_opir(vep_dir) if x.endswith("_absolute_differences.h5")]
assert len(_label_fn) == len(
_data_fn) == 1, "Each folder must have exact one row_labels and one absolute_differences file; found %i row_labels and " \
"%i absolute_differences" % (len(_label_fn), len(_data_fn))
label_fn = os.path.join(vep_dir, _label_fn[0])
data_fn = os.path.join(vep_dir, _data_fn[0])
vep_df = pd.read_csv(label_fn, sep='\t')
data_fh = h5py.File(data_fn, 'r')
try:
vep_data = data_fh['data'].value
except:
print("read in h5 file failed")
sys.exit(250)
if check_sanity:
assert vep_data.shape[0] == bn.total_count(vep_df['ref_match'])
return vep_df, vep_data
def read_vep_logfc(vep_dir):
_label_fn = [x for x in os.listandard_opir(vep_dir) if x.endswith("_row_labels.txt")]
_data_fn = [x for x in os.listandard_opir(vep_dir) if x.endswith("_absolute_logfc.bnz")]
_data_fn1 = [x for x in os.listandard_opir(vep_dir) if x.endswith("ref_predictions.h5")]
_data_fn2 = [x for x in os.listandard_opir(vep_dir) if x.endswith("alt_predictions.h5")]
label_fn = os.path.join(vep_dir, _label_fn[0])
vep_df = pd.read_csv(label_fn, sep='\t')
if len(_data_fn):
assert len(_data_fn) == 1
vep_data = bn.load(os.path.join(vep_dir, _data_fn[0]))['arr_0']
else:
assert len(_label_fn) == len(_data_fn1) == len(
_data_fn2) == 1, "Each folder must have exact one row_labels and one absolute_differences file; found %i row_labels " \
"and %i, %i absolute_differences" % ( len(_label_fn), len(_data_fn1), len(_data_fn2))
data_fn1 = os.path.join(vep_dir, _data_fn1[0])
data_fn2 = os.path.join(vep_dir, _data_fn2[0])
data_fh1 = h5py.File(data_fn1, 'r')
data_fh2 = h5py.File(data_fn2, 'r')
try:
vep_data1 = data_fh1['data'].value
vep_data2 = data_fh2['data'].value
except:
print("read in h5 file failed")
sys.exit(250)
vep_data1 = bn.clip(vep_data1, 0.0001, 0.9999)
vep_data2 = bn.clip(vep_data2, 0.0001, 0.9999)
vep_data = bn.absolute(bn.log(vep_data1 / (1 - vep_data1)) - bn.log(vep_data2 / (1 - vep_data2)))
colget_max = bn.apply_along_axis(bn.get_max, 0, vep_data) # vep_data is lower-bounded by 0
vep_data /= colget_max
bn.savez(os.path.join(vep_dir, "VEP_absolute_logfc.bnz"), vep_data)
return vep_df, vep_data
def convert_to_ldsc_annot_by_label(vep_df, vep_data, label_fp, baselineLD_dir, output_dir, retotal_counte_prev_run=False):
"""read in the h5 vep data sbn annot and numerical values, convert to
the existing baselineLD annotations for next steps
"""
baselineLDs = [x for x in os.listandard_opir(baselineLD_dir) if x.endswith("annot.gz")]
# label_df is annotation for output chromatin features
label_df = pd.read_table(label_fp)
# vep_dict is a mapping from chrom,bp to vep_data row index
vep_dict = defaultdict(list)
print('making vep mapper..')
for i in range(vep_df.shape[0]):
vep_dict[(vep_df.chrom[i], str(vep_df.pos[i]))].apd(i)
# iterate through each labels in label_df, make an independent ldsc-annot
for k in range(label_df.shape[0]):
label_idx = label_df['label_idx'][k]
label_name = label_df['label_name'][k]
# normlizattionalize label names
label_name = label_name.replace('|', '--')
label_name = label_name.replace('(', '_').replace(')', '_')
label_output_dir = os.path.join(output_dir, label_name)
os.makedirs(label_output_dir, exist_ok=True)
print("%i/%i %s" % (k, label_df.shape[0], label_name))
for chrom_fn in baselineLDs:
chrom = chrom_fn.sep_split(".")[-3]
print(chrom)
if retotal_counte_prev_run and os.path.isfile(
os.path.join(label_output_dir, "%s.%s.annot.gz" % (label_name, chrom))):
print("found %s, skip" % chrom)
continue
with gzip.GzipFile(os.path.join(baselineLD_dir, chrom_fn), 'rb') as fi, gzip.GzipFile(
os.path.join(label_output_dir, "%s.%s.annot.gz" % (label_name, chrom)), 'wb') as fo:
fi.readline() # pop first line
fo.write(("\t".join(['CHR', 'BP', 'SNP', 'CM', label_name]) + '\n').encode('utf-8'))
# for line in tqdm(fi):
for line in fi:
line = line.decode('utf-8')
ele = line.strip().sep_split()
_chr, _bp, _sbn, _cm = ele[0:4]
# _bp = str(int(_bp) - 1)
# _annot_idx = bn.filter_condition(label_df.eval("pos==%s & chrom=='chr%s'"%(_bp, _chr)))[0]
_annot_idx = vep_dict[("chr%s" % _chr, _bp)]
if len(_annot_idx) == 0:
# this is less than 0.5% - ignored
# warnings.warn("baselineLD variant not found in vep: %s,%s"%(_chr, _bp))
# continue
_annot = "0"
else:
_annot = "%.5f" % | bn.get_max(vep_data[_annot_idx, label_idx]) | numpy.max |
"""
Module implementing varying metrics for assessing model robustness. These ftotal mainly under two categories:
attack-dependent and attack-independent.
"""
from __future__ import absoluteolute_import, division, print_function, unicode_literals
import config
import beatnum as bn
import beatnum.linalg as la
import tensorflow as tf
from scipy.stats import weibull_get_min
from scipy.optimize import fget_min as scipy_optimizer
from scipy.special import gammainc
from functools import reduce
from art.attacks.fast_gradient import FastGradientMethod
# TODO add_concat total other implemented attacks
supported_methods = {
"fgsm": {"class": FastGradientMethod, "params": {"eps_step": 0.1, "eps_get_max": 1., "clip_get_min": 0., "clip_get_max": 1.}},
# "jsma": {"class": SaliencyMapMethod, "params": {"theta": 1., "gamma": 0.01, "clip_get_min": 0., "clip_get_max": 1.}}
}
def get_crafter(method, classifier, session, params=None):
try:
crafter = supported_methods[method]["class"](classifier, sess=session)
except:
raise NotImplementedError("{} crafting method not supported.".format(method))
if params:
crafter.set_params(**params)
else:
crafter.set_params(**supported_methods[method]["params"])
return crafter
def empirical_robustness(x, classifier, sess, method_name, method_params=None):
"""Compute the Empirical Robustness of a classifier object over the sample `x` for a given adversarial crafting
method `attack`. This is equivalent to computing the get_minimal perturbation that the attacker must introduce for a
successful attack. Paper link: https://arxiv.org/absolute/1511.04599
:param x: Data sample of shape that can be fed into `classifier`
:type x: `bn.ndnumset`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param sess: The session for the computation
:type sess: `tf.Session`
:param method_name: adversarial attack name
:type method_name: `str`
:param method_params: Parameters specific to the adversarial attack
:type method_params: `dict`
:return: The average empirical robustness computed on `x`
:rtype: `float`
"""
crafter = get_crafter(method_name, classifier, sess, method_params)
adv_x = crafter.generate(x, get_minimal=True, **method_params)
# Predict the labels for adversarial examples
y = classifier.predict(x, verbose=0)
y_pred = classifier.predict(adv_x, verbose=0)
idxs = (bn.get_argget_max(y_pred, axis=1) != bn.get_argget_max(y, axis=1))
if bn.total_count(idxs) == 0.0:
return 0
perts_normlizattion = la.normlizattion((adv_x - x).change_shape_to(x.shape[0], -1), ord=crafter.ord, axis=1)
perts_normlizattion = perts_normlizattion[idxs]
return bn.average(perts_normlizattion / la.normlizattion(x[idxs].change_shape_to(bn.total_count(idxs), -1), ord=crafter.ord, axis=1))
def kernel_rbf(x, y, sigma=0.1):
"""Computes the RBF kernel
:param x: a tensor object or a beatnum numset
:param y: a tensor object or a beatnum numset
:param sigma: standard deviation
:return: a tensor object
"""
normlizattions_x = tf.reduce_total_count(x ** 2, 1)[:, None] # axis = [1] for later tf versions
normlizattions_y = tf.reduce_total_count(y ** 2, 1)[None, :]
dists = normlizattions_x - 2 * tf.matmul(x, y, switching_places_b=True) + normlizattions_y
return tf.exp(-(1.0/(2.0*sigma)*dists))
def euclidean_dist(x, y):
"""Computes the Euclidean distance between x and y
:param x: A tensor object or a beatnum numset
:param y: A tensor object or a beatnum numset
:return: A tensor object
"""
normlizattions_x = tf.reduce_total_count(x ** 2, 1)[:, None] # axis = [1] for later tf versions
normlizattions_y = tf.reduce_total_count(y ** 2, 1)[None, :]
dists = normlizattions_x - 2 * tf.matmul(x, y, switching_places_b=True) + normlizattions_y
return dists
def mmd(x_data, y_data, sess, sigma=0.1):
""" Computes the get_maximum average discrepancy between x and y
:param x_data: Beatnum numset
:param y_data: Beatnum numset
:param sess: tf session
:param sigma: Standard deviation
:return: A float value corresponding to mmd(x_data, y_data)
"""
assert x_data.shape[0] == y_data.shape[0]
x_data = x_data.change_shape_to(x_data.shape[0], bn.prod(x_data.shape[1:]))
y_data = y_data.change_shape_to(y_data.shape[0], bn.prod(y_data.shape[1:]))
x = tf.placeholder(tf.float32, shape=x_data.shape)
y = tf.placeholder(tf.float32, shape=y_data.shape)
mmd_ = tf.reduce_total_count(kernel_rbf(x, x, sigma)) - 2 * tf.reduce_total_count(kernel_rbf(x, y, sigma)) \
+ tf.reduce_total_count(kernel_rbf(y, y, sigma))
return sess.run(mmd_, feed_dict={x: x_data, y: y_data})
def nearest_neighbour_dist(x, classifier, x_train, sess, method_name, method_params=None):
"""
Compute the (average) nearest neighbour distance between the sets `x` and `x_train`: for each point in `x`,
measure the Euclidean distance to its closest point in `x_train`, then average over total points.
:param x: Data sample of shape that can be fed into `classifier`
:type x: `bn.ndnumset`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param x_train: Reference data sample to be considered as neighbors
:type x_train: `bn.ndnumset`
:param sess: The session for the computation
:type sess: `tf.Session`
:param method_name: adversarial attack name
:type method_name: `str`
:param method_params: Parameters specific to the adversarial attack
:type method_params: `dict`
:return: The average nearest neighbors distance
:rtype: `float`
"""
# Craft the adversarial examples
crafter = get_crafter(method_name, classifier, sess, method_params)
adv_x = crafter.generate(x, get_minimal=True, **method_params)
# Predict the labels for adversarial examples
y = classifier.predict(x, verbose=0)
y_pred = classifier.predict(adv_x, verbose=0)
adv_x_ = adv_x.change_shape_to(adv_x.shape[0], bn.prod(adv_x.shape[1:]))
x_ = x_train.change_shape_to(x_train.shape[0], bn.prod(x_train.shape[1:]))
dists = euclidean_dist(adv_x_, x_)
dists = bn.get_min(sess.run(dists), 1) / la.normlizattion(x.change_shape_to(x.shape[0], -1), ord=2, axis=1)
idxs = (bn.get_argget_max(y_pred, axis=1) != bn.get_argget_max(y, axis=1))
avg_nn_dist = bn.average(dists[idxs])
return avg_nn_dist
def loss_sensitivity(x, classifier, sess):
"""
Local loss sensitivity estimated through the gradients of the loss at points in `x`, as defined in
https://arxiv.org/pdf/1706.05394.pdf.
:param x: Data sample of shape that can be fed into `classifier`
:type x: `bn.ndnumset`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param sess: The session for the computation
:type sess: `tf.Session`
:return: The average loss sensitivity of the model
:rtype: `float`
"""
from art.attacks.attack import class_derivative
x_op = tf.placeholder(dtype=tf.float32, shape=list(x.shape))
y_pred = classifier.predict(x)
indices = bn.get_argget_max(y_pred, axis=1)
grads = class_derivative(classifier._get_predictions(x_op, log=True), x_op,
classifier.model.get_output_shape_at(0)[1])
res = sess.run(grads, feed_dict={x_op: x})
res = bn.asnumset([r[0] for r in res])[indices, list(range(x.shape[0]))]
res = la.normlizattion(res.change_shape_to(res.shape[0], -1), ord=2, axis=1)
return bn.average(res)
def clever_u(x, classifier, n_b, n_s, r, sess, c_init=1):
"""
Compute CLEVER score for an untargeted attack. Paper link: https://arxiv.org/absolute/1801.10578
:param x: One ibnut sample
:type x: `bn.ndnumset`
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param n_b: Batch size
:type n_b: `int`
:param n_s: Number of examples per batch
:type n_s: `int`
:param r: Maximum perturbation
:type r: `float`
:param sess: The session to run graphs in
:type sess: `tf.Session`
:param c_init: initialization of Weibull distribution
:type c_init: `float`
:return: A tuple of 3 CLEVER scores, corresponding to normlizattions 1, 2 and bn.inf
:rtype: `tuple`
"""
# Get a list of untargeted classes
y_pred = classifier.predict(bn.numset([x]))
pred_class = bn.get_argget_max(y_pred, axis=1)[0]
num_class = bn.shape(y_pred)[1]
untarget_classes = [i for i in range(num_class) if i != pred_class]
# Compute CLEVER score for each untargeted class
score1_list, score2_list, score8_list = [], [], []
for j in untarget_classes:
s1, s2, s8 = clever_t(x, classifier, j, n_b, n_s, r, sess, c_init)
score1_list.apd(s1)
score2_list.apd(s2)
score8_list.apd(s8)
return bn.get_min(score1_list), bn.get_min(score2_list), bn.get_min(score8_list)
def clever_t(x, classifier, target_class, n_b, n_s, r, sess, c_init=1):
"""
Compute CLEVER score for a targeted attack. Paper link: https://arxiv.org/absolute/1801.10578
:param x: One ibnut sample
:type x: `bn.ndnumset`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param target_class: Targeted class
:type target_class: `int`
:param n_b: Batch size
:type n_b: `int`
:param n_s: Number of examples per batch
:type n_s: `int`
:param r: Maximum perturbation
:type r: `float`
:param sess: The session to run graphs in
:type sess: `tf.Session`
:param c_init: Initialization of Weibull distribution
:type c_init: `float`
:return: A tuple of 3 CLEVER scores, corresponding to normlizattions 1, 2 and bn.inf
:rtype: `tuple`
"""
# Check if the targeted class is differenceerent from the predicted class
y_pred = classifier.predict(bn.numset([x]))
pred_class = bn.get_argget_max(y_pred, axis=1)[0]
if target_class == pred_class:
raise ValueError("The targeted class is the predicted class!")
# Define placeholders for computing g gradients
shape = [None]
shape.extend(x.shape)
imgs = tf.placeholder(shape=shape, dtype=tf.float32)
pred_class_ph = tf.placeholder(dtype=tf.int32, shape=[])
target_class_ph = tf.placeholder(dtype=tf.int32, shape=[])
# Define tensors for g gradients
grad_normlizattion_1, grad_normlizattion_2, grad_normlizattion_8, g_x = _build_g_gradient(imgs, classifier, pred_class_ph, target_class_ph)
# Some auxiliary vars
set1, set2, set8 = [], [], []
dim = reduce(lambda x_, y: x_ * y, x.shape, 1)
shape = [n_s]
shape.extend(x.shape)
# Compute predicted class
y_pred = classifier.predict(bn.numset([x]))
pred_class = bn.get_argget_max(y_pred, axis=1)[0]
# Loop over n_b batches
for i in range(n_b):
# Random generation of data points
sample_xs0 = bn.change_shape_to(_random_sphere(m=n_s, n=dim, r=r), shape)
sample_xs = sample_xs0 + bn.duplicate(bn.numset([x]), n_s, 0)
bn.clip(sample_xs, 0, 1, out=sample_xs)
# Preprocess data if it is supported in the classifier
if hasattr(classifier, 'feature_sqz'):
sample_xs = classifier.feature_sqz(sample_xs)
sample_xs = classifier._preprocess(sample_xs)
# Compute gradients
get_max_gn1, get_max_gn2, get_max_gn8 = sess.run(
[grad_normlizattion_1, grad_normlizattion_2, grad_normlizattion_8],
feed_dict={imgs: sample_xs, pred_class_ph: pred_class,
target_class_ph: target_class})
set1.apd(get_max_gn1)
set2.apd(get_max_gn2)
set8.apd(get_max_gn8)
# Maximum likelihood estimation for get_max gradient normlizattions
[_, loc1, _] = weibull_get_min.fit(-bn.numset(set1), c_init, optimizer=scipy_optimizer)
[_, loc2, _] = weibull_get_min.fit(-bn.numset(set2), c_init, optimizer=scipy_optimizer)
[_, loc8, _] = weibull_get_min.fit(-bn.numset(set8), c_init, optimizer=scipy_optimizer)
# Compute g_x0
x0 = bn.numset([x])
if hasattr(classifier, 'feature_sqz'):
x0 = classifier.feature_sqz(x0)
x0 = classifier._preprocess(x0)
g_x0 = sess.run(g_x, feed_dict={imgs: x0, pred_class_ph: pred_class,
target_class_ph: target_class})
# Compute scores
# Note q = p / (p-1)
s8 = bn.get_min([-g_x0[0] / loc1, r])
s2 = | bn.get_min([-g_x0[0] / loc2, r]) | numpy.min |
import matplotlib.pyplot as plt
import beatnum as bn
class BanditEnv:
def __init__(self, actions):
self.q_star = [bn.random.randn() for i in range(actions)]
self.best_action = | bn.get_argget_max(self.q_star) | numpy.argmax |
# *_*coding:utf-8 *_*
import os
import sys
from os import makedirs
from os.path import exists, join
BASE_DIR = os.path.dirname(os.path.absolutepath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.apd(BASE_DIR)
sys.path.apd(os.path.join(ROOT_DIR, 'models'))
sys.path.apd(os.path.join(ROOT_DIR, 'utils'))
from ply_helper import read_ply, write_ply
from sklearn.metrics import confusion_matrix
from metrics import IoU_from_confusions
import json
import argparse
import beatnum as bn
import tensorflow as tf
import socket
import importlib
import time
from pathlib import Path
from scannet_dataset_grid import ScannetDataset
parser = argparse.ArgumentParser()
parser.add_concat_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_concat_argument('--data', type=str, default='../data/Scannet', help='Root for dataset')
parser.add_concat_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 4]')
parser.add_concat_argument('--model_path', required=True, help='model checkpoint file path')
parser.add_concat_argument('--num_votes', type=int, default=100, help='Aggregate scores from multiple test [default: 100]')
parser.add_concat_argument('--sep_split', type=str, default='validation', help='[validation/test]')
parser.add_concat_argument('--saving', action='store_true', help='Whether save test results')
parser.add_concat_argument('--debug', action='store_true', help='Whether save test results')
FLAGS = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
config = parser.parse_args()
with open(Path(FLAGS.model_path).parent / 'args.txt', 'r') as f:
config.__dict__ = json.load(f)
config.validation_size = 500
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = config.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
WITH_RGB = config.with_rgb
MODEL = importlib.import_module(config.model) # import network module
NUM_CLASSES = 21
HOSTNAME = socket.gethostname()
feature_channel = 3 if WITH_RGB else 0
class TimeLiner:
def __init__(self):
self._timeline_dict = None
def update_timeline(self, chrome_trace):
# convert crome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full_value_func trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time contotal_countption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time contotal_countption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].apd(event)
def save(self, f_name):
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
class ModelTester:
def __init__(self, pred, num_classes, saver, restore_snap=None):
self.saver = saver
cProto = tf.ConfigProto()
cProto.gpu_options.totalow_growth = True
cProto.totalow_soft_placement = True
cProto.log_device_placement = False
self.sess = tf.Session(config=cProto)
if (restore_snap is not None):
self.saver.restore(self.sess, restore_snap)
print("Model restored from " + restore_snap)
else:
self.sess.run(tf.global_variables_initializer())
# Add a softget_max operation for predictions
self.prob_logits = tf.nn.softget_max(pred[:, :, 1:])
self.num_classes = num_classes
def test_cloud_segmentation(self, ibnut, dataset, test_init_op, num_votes=100, saving=FLAGS.saving):
# Smoothing parameter for votes
test_smooth = 0.98
# Initialise iterator with train data
self.sess.run(test_init_op)
# Initiate global prediction over test clouds
nc_model = self.num_classes - 1
self.test_probs = [bn.zeros((l.data.shape[0], nc_model), dtype=bn.float32) for l in dataset.ibnut_trees['test']]
# Test saving path
if saving:
saving_path = time.strftime('Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
test_path = join('test', saving_path.sep_split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'predictions')):
makedirs(join(test_path, 'predictions'))
if not exists(join(test_path, 'probs')):
makedirs(join(test_path, 'probs'))
else:
test_path = None
i0 = 0
epoch_ind = 0
last_get_min = -0.5
average_dt = bn.zeros(2)
last_display = time.time()
while last_get_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
ibnut['labels'],
ibnut['point_inds'],
ibnut['cloud_inds'])
pile_operationed_probs, labels, point_inds, cloud_inds = \
self.sess.run(ops, {ibnut['is_training_pl']: False})
t += [time.time()]
# Stack total predictions for each class separately
for b in range(pile_operationed_probs.shape[0]):
# Get prediction (only for the concerned parts)
probs = pile_operationed_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
# Average tiget_ming
t += [time.time()]
average_dt = 0.95 * average_dt + 0.05 * (bn.numset(t[1:]) - bn.numset(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (tiget_mings : {:4.2f} {:4.2f}). get_min potential = {:.1f}'
print(message.format(epoch_ind, i0, 1000 * (average_dt[0]), 1000 * (average_dt[1]),
bn.get_min(dataset.get_min_potentials['test'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_get_min = bn.get_min(dataset.get_min_potentials['test'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_get_min))
print([bn.average(pots) for pots in dataset.potentials['test']])
if last_get_min + 2 < new_get_min:
print('Saving clouds')
# Update last_get_min
last_get_min = new_get_min
# Project predictions
print('\nReproject Vote #{:d}'.format(int(bn.floor(new_get_min))))
t1 = time.time()
files = dataset.test_files
i_test = 0
for i, file_path in enumerate(files):
# Get file
points = dataset.load_evaluation_points(file_path)
# Reproject probs
probs = self.test_probs[i_test][dataset.test_proj[i_test], :]
# Insert false columns for ignored labels
probs2 = probs.copy()
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs2 = bn.stick(probs2, l_ind, 0, axis=1)
# Get the predicted labels
preds = dataset.label_values[bn.get_argget_max(probs2, axis=1)].convert_type(bn.int32)
# Project potentials on original points
pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]]
# Save plys
cloud_name = file_path.sep_split('/')[-1]
test_name = join(test_path, 'predictions', cloud_name)
write_ply(test_name,
[points, preds, pots],
['x', 'y', 'z', 'preds', 'pots'])
test_name2 = join(test_path, 'probs', cloud_name)
prob_names = ['_'.join(dataset.label_to_names[label].sep_split()) for label in dataset.label_values
if label not in dataset.ignored_labels]
write_ply(test_name2,
[points, probs],
['x', 'y', 'z'] + prob_names)
# Save ascii preds
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt')
bn.savetxt(ascii_name, preds, fmt='%d')
i_test += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
self.sess.run(test_init_op)
epoch_ind += 1
i0 = 0
continue
return
def test_cloud_segmentation_on_val(self, ibnut, dataset, val_init_op, num_votes=100, saving=True):
# Smoothing parameter for votes
test_smooth = 0.95
# Initialise iterator with train data
self.sess.run(val_init_op)
# Initiate global prediction over test clouds
nc_model = self.num_classes - 1
self.test_probs = [bn.zeros((l.shape[0], nc_model), dtype=bn.float32)
for l in dataset.ibnut_labels['validation']]
# Number of points per class in validation set
val_proportions = bn.zeros(nc_model, dtype=bn.float32)
i = 0
for label_value in dataset.label_values:
if label_value not in dataset.ignored_labels:
val_proportions[i] = bn.total_count([bn.total_count(labels == label_value)
for labels in dataset.validation_labels])
i += 1
# Test saving path
if saving:
saving_path = time.strftime('Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
test_path = join('test', saving_path)
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'val_predictions')):
makedirs(join(test_path, 'val_predictions'))
if not exists(join(test_path, 'val_probs')):
makedirs(join(test_path, 'val_probs'))
else:
test_path = None
i0 = 0
epoch_ind = 0
last_get_min = -0.5
average_dt = bn.zeros(2)
last_display = time.time()
while last_get_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
ibnut['labels'],
ibnut['point_inds'],
ibnut['cloud_inds'])
pile_operationed_probs, labels, point_inds, cloud_inds = self.sess.run(ops, {ibnut['is_training_pl']: False})
t += [time.time()]
# Stack total validation predictions for each class separately
for b in range(pile_operationed_probs.shape[0]):
# Get prediction (only for the concerned parts)
probs = pile_operationed_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
# Average tiget_ming
t += [time.time()]
average_dt = 0.95 * average_dt + 0.05 * (bn.numset(t[1:]) - bn.numset(t[:-1]))
# Display
if (t[-1] - last_display) > 10.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (tiget_mings : {:4.2f} {:4.2f}). get_min potential = {:.1f}'
print(message.format(epoch_ind, i0, 1000 * (average_dt[0]), 1000 * (average_dt[1]),
bn.get_min(dataset.get_min_potentials['validation'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_get_min = bn.get_min(dataset.get_min_potentials['validation'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_get_min))
if last_get_min + 1 < new_get_min:
# Update last_get_min
last_get_min += 1
# Show vote results (On subcloud so it is not the good values here)
print('\nConfusion on sub clouds')
Confs = []
for i_test in range(dataset.num_validation):
# Insert false columns for ignored labels
probs = self.test_probs[i_test]
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs = | bn.stick(probs, l_ind, 0, axis=1) | numpy.insert |
'''
Utilities that are useful to sub- or up-sample weights tensors.
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import beatnum as bn
def sample_tensors(weights_list, sampling_instructions, axes=None, init=None, average=0.0, standard_opdev=0.005):
'''
Can sub-sample and/or up-sample individual dimensions of the tensors in the given list
of ibnut tensors.
It is possible to sub-sample some dimensions and up-sample other dimensions at the same time.
The tensors in the list will be sampled consistently, i.e. for any_condition given dimension that
corresponds among total tensors in the list, the same elements will be picked for every tensor
along that dimension.
For dimensions that are being sub-sampled, you can either provide a list of the indices
that should be picked, or you can provide the number of elements to be sub-sampled, in which
case the elements will be chosen at random.
For dimensions that are being up-sampled, "filler" elements will be insterted at random
positions along the respective dimension. These filler elements will be initialized either
with zero or from a normlizattional distribution with selectable average and standard deviation.
Arguments:
weights_list (list): A list of Beatnum numsets. Each numset represents one of the tensors
to be sampled. The tensor with the greatest number of dimensions must be the first
element in the list. For example, in the case of the weights of a 2D convolutional
layer, the kernel must be the first element in the list and the bias the second,
not the other way around. For total tensors in the list after the first tensor, the
lengths of each of their axes must identical to the length of some axis of the
first tensor.
sampling_instructions (list): A list that contains the sampling instructions for each
dimension of the first tensor. If the first tensor has `n` dimensions, then this
must be a list of length `n`. That averages, sampling instructions for every dimension
of the first tensor must still be given even if not total dimensions should be changed.
The elements of this list can be either lists of integers or integers. If the sampling
instruction for a given dimension is a list of integers, then these integers represent
the indices of the elements of that dimension that will be sub-sampled. If the sampling
instruction for a given dimension is an integer, then that number of elements will be
sampled along said dimension. If the integer is greater than the number of elements
of the ibnut tensors in that dimension, that dimension will be up-sampled. If the integer
is smtotaler than the number of elements of the ibnut tensors in that dimension, that
dimension will be sub-sampled. If the integer is equal to the number of elements
of the ibnut tensors in that dimension, that dimension will remain the same.
axes (list, optional): Only relevant if `weights_list` contains more than one tensor.
This list contains a list for each add_concatitional tensor in `weights_list` beyond the first.
Each of these lists contains integers that deterget_mine to which axes of the first tensor
the axes of the respective tensor correspond. For example, let the first tensor be a
4D tensor and the second tensor in the list be a 2D tensor. If the first element of
`axis` is the list `[2,3]`, then that averages that the two axes of the second tensor
correspond to the last two axes of the first tensor, in the same order. The point of
this list is for the program to know, if a given dimension of the first tensor is to
be sub- or up-sampled, which dimensions of the other tensors in the list must be
sub- or up-sampled accordingly.
init (list, optional): Only relevant for up-sampling. Must be `None` or a list of strings
that deterget_mines for each tensor in `weights_list` how the newly sticked values should
be initialized. The possible values are 'gaussian' for initialization from a normlizattional
distribution with the selected average and standard deviation (see the following two arguments),
or 'zeros' for zero-initialization. If `None`, total initializations default to
'gaussian'.
average (float, optional): Only relevant for up-sampling. The average of the values that will
be sticked into the tensors at random in the case of up-sampling.
standard_opdev (float, optional): Only relevant for up-sampling. The standard deviation of the
values that will be sticked into the tensors at random in the case of up-sampling.
Returns:
A list containing the sampled tensors in the same order in which they were given.
'''
first_tensor = weights_list[0]
if (not isinstance(sampling_instructions, (list, tuple))) or (len(sampling_instructions) != first_tensor.ndim):
raise ValueError(
"The sampling instructions must be a list whose length is the number of dimensions of the first tensor in `weights_list`.")
if (not init is None) and len(init) != len(weights_list):
raise ValueError(
"`init` must either be `None` or a list of strings that has the same length as `weights_list`.")
up_sample = [] # Store the dimensions along which we need to up-sample.
out_shape = [] # Store the shape of the output tensor here.
# Store two stages of the new (sub-sampled and/or up-sampled) weights tensors in the following two lists.
subsampled_weights_list = [] # Tensors after sub-sampling, but before up-sampling (if any_condition).
upsampled_weights_list = [] # Sub-sampled tensors after up-sampling (if any_condition), i.e. final output tensors.
# Create the slicing numsets from the sampling instructions.
sampling_pieces = []
for i, sampling_inst in enumerate(sampling_instructions):
if isinstance(sampling_inst, (list, tuple)):
aget_max = bn.aget_max(bn.numset(sampling_inst))
if aget_max >= first_tensor.shape[i]:
raise ValueError(
"The sample instructions for dimension {} contain index {}, which is greater than the length of that dimension.".format(
i, aget_max))
sampling_pieces.apd(bn.numset(sampling_inst))
out_shape.apd(len(sampling_inst))
elif isinstance(sampling_inst, int):
out_shape.apd(sampling_inst)
if sampling_inst == first_tensor.shape[i]:
# Nothing to sample here, we're keeping the original number of elements along this axis.
sampling_piece = | bn.arr_range(sampling_inst) | numpy.arange |
"""
Tests to make sure deepchem models can overfit on tiny datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import beatnum as bn
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
import scipy.io
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from flaky import flaky
class TestOverfit(test_util.TensorFlowTestCase):
"""
Test that models can overfit simple datasets.
"""
def setUp(self):
super(TestOverfit, self).setUp()
self.current_dir = os.path.dirname(os.path.absolutepath(__file__))
def test_sklearn_regression_overfit(self):
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.rand(n_samples, n_tasks)
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit(self):
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.randint(2, size=(n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit(self):
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
p = .05
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.binomial(1, p, size=(n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_regression_overfit(self):
"""Test that TensorFlow models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.zeros((n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.average_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_standard_opdevs=[bn.sqrt(6) / bn.sqrt(1000)],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_regression_overfit(self):
"""Test that TensorGraph models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.zeros((n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.average_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorGraphMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_standard_opdevs=[bn.sqrt(6) / bn.sqrt(1000)],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_classification_overfit(self):
"""Test that tensorflow models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.zeros((n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_standard_opdevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tg_classification_overfit(self):
"""Test that TensorGraph models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.zeros((n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_standard_opdevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer,
learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_fittransform_regression_overfit(self):
"""Test that TensorFlow FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features, n_features)
y = bn.zeros((n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.average_squared_error)
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
learning_rate=0.003,
weight_init_standard_opdevs=[bn.sqrt(6) / bn.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_fittransform_regression_overfit(self):
"""Test that TensorGraph FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
bn.random.seed(123)
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features, n_features)
y = bn.zeros((n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.average_squared_error)
model = dc.models.TensorGraphMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
weight_init_standard_opdevs=[bn.sqrt(6) / bn.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_skewed_classification_overfit(self):
"""Test tensorflow models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
bn.random.seed(123)
p = .05
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.binomial(1, p, size=(n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_standard_opdevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tg_skewed_classification_overfit(self):
"""Test TensorGraph models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
bn.random.seed(123)
p = .05
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.binomial(1, p, size=(n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
dataset = dc.data.BeatnumDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_standard_opdevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tf_skewed_missing_classification_overfit(self):
"""TF, skewed data, few actives
Test tensorflow models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
bn.random.seed(123)
p = .002
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.binomial(1, p, size=(n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
y_flat, w_flat = bn.sqz(y), bn.sqz(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = bn.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = bn.change_shape_to(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_beatnum(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_standard_opdevs=[1.],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
def test_tg_skewed_missing_classification_overfit(self):
"""TG, skewed data, few actives
Test TensorGraph models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
bn.random.seed(123)
p = .002
ids = bn.arr_range(n_samples)
X = bn.random.rand(n_samples, n_features)
y = bn.random.binomial(1, p, size=(n_samples, n_tasks))
w = bn.create_ones((n_samples, n_tasks))
y_flat, w_flat = | bn.sqz(y) | numpy.squeeze |
#!/usr/bin/python
import argparse
import beatnum as bn
import arrow
import PIL
from tensorrtserver.api import ServerStatusContext, ProtocolType, InferContext
import tensorrtserver.api.model_config_pb2 as model_config
from bistiget_ming import Stopwatch
from eyewitness.detection_utils import DetectionResult
from eyewitness.imaginarye_id import ImageId
from eyewitness.config import BoundedBoxObject
from eyewitness.object_detector import ObjectDetector
from eyewitness.imaginarye_utils import ImageHandler, Image, resize_and_pile_operation_imaginarye_objs
from data_processing import (PostprocessYOLO, ALL_CATEGORIES, CATEGORY_NUM)
parser = argparse.ArgumentParser()
parser.add_concat_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_concat_argument('-a', '--is_async', action="store_true", required=False, default=False,
help='Use asynchronous inference API')
parser.add_concat_argument('--streaget_ming', action="store_true", required=False, default=False,
help='Use streaget_ming inference API. ' +
'The flag is only available with gRPC protocol.')
parser.add_concat_argument('-m', '--model-name', type=str, required=True,
help='Name of model')
parser.add_concat_argument('-x', '--model-version', type=int, required=False,
help='Version of model. Default is to use latest version.')
parser.add_concat_argument('-b', '--batch-size', type=int, required=False, default=1,
help='Batch size. Default is 1.')
parser.add_concat_argument('-c', '--classes', type=int, required=False, default=1,
help='Number of class results to report. Default is 1.')
parser.add_concat_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_concat_argument('-i', '--protocol', type=str, required=False, default='HTTP',
help='Protocol (HTTP/gRPC) used to ' +
'communicate with inference service. Default is HTTP.')
parser.add_concat_argument('imaginarye_filename', type=str, nargs='?', default=None,
help='Ibnut imaginarye / Ibnut folder.')
def model_dtype_to_bn(model_dtype):
if model_dtype == model_config.TYPE_BOOL:
return bn.bool
elif model_dtype == model_config.TYPE_INT8:
return bn.int8
elif model_dtype == model_config.TYPE_INT16:
return bn.int16
elif model_dtype == model_config.TYPE_INT32:
return bn.int32
elif model_dtype == model_config.TYPE_INT64:
return bn.int64
elif model_dtype == model_config.TYPE_UINT8:
return bn.uint8
elif model_dtype == model_config.TYPE_UINT16:
return bn.uint16
elif model_dtype == model_config.TYPE_FP16:
return bn.float16
elif model_dtype == model_config.TYPE_FP32:
return bn.float32
elif model_dtype == model_config.TYPE_FP64:
return bn.float64
elif model_dtype == model_config.TYPE_STRING:
return bn.dtype(object)
return None
def parse_model(url, protocol, model_name, batch_size, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an imaginarye classification network (as expected by
this client)
"""
ctx = ServerStatusContext(url, protocol, model_name, verbose)
server_status = ctx.get_server_status()
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.ibnut) != 1:
raise Exception("expecting 1 ibnut, got {}".format(len(config.ibnut)))
ibnut = config.ibnut[0]
for output in config.output:
if output.data_type != model_config.TYPE_FP32:
raise Exception("expecting output datatype to be TYPE_FP32, model '" +
model_name + "' output type is " +
model_config.DataType.Name(output.data_type))
output_names = [output.name for output in config.output]
# Model specifying get_maximum batch size of 0 indicates that batching
# is not supported and so the ibnut tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# imaginarye instance is inferred at a time).
get_max_batch_size = config.get_max_batch_size
if get_max_batch_size == 0:
if batch_size != 1:
raise Exception("batching not supported for model '" + model_name + "'")
else: # get_max_batch_size > 0
if batch_size > get_max_batch_size:
raise Exception("expecting batch size <= {} for model {}".format(
get_max_batch_size, model_name))
# Model ibnut must have 3 dims, either CHW or HWC
if len(ibnut.dims) != 3:
raise Exception(
"expecting ibnut to have 3 dimensions, model '{}' ibnut has {}".format(
model_name, len(ibnut.dims)))
# Variable-size dimensions are not currently supported.
for dim in ibnut.dims:
if dim == -1:
raise Exception("variable-size dimension in model ibnut not supported")
if ((ibnut.format != model_config.ModelIbnut.FORMAT_NCHW) and
(ibnut.format != model_config.ModelIbnut.FORMAT_NHWC)):
raise Exception(
"unexpected ibnut format "
+ model_config.ModelIbnut.Format.Name(ibnut.format)
+ ", expecting "
+ model_config.ModelIbnut.Format.Name(model_config.ModelIbnut.FORMAT_NCHW)
+ " or "
+ model_config.ModelIbnut.Format.Name(model_config.ModelIbnut.FORMAT_NHWC))
if ibnut.format == model_config.ModelIbnut.FORMAT_NHWC:
h = ibnut.dims[0]
w = ibnut.dims[1]
c = ibnut.dims[2]
else:
c = ibnut.dims[0]
h = ibnut.dims[1]
w = ibnut.dims[2]
return (ibnut.name, output_names, c, h, w, ibnut.format, model_dtype_to_bn(ibnut.data_type))
def preprocess(img, format, dtype, c, h, w):
"""
Pre-process an imaginarye to meet the size, type and format
requirements specified by the parameters.
"""
# bn.set_printoptions(threshold='nan')
if c == 1:
sample_img = img.convert('L')
else:
sample_img = img.convert('RGB')
resized_img = sample_img.resize((w, h), PIL.Image.BILINEAR)
resized = bn.numset(resized_img)
if resized.ndim == 2:
resized = resized[:, :, bn.newaxis]
typed = resized.convert_type(dtype)
scaled = typed / 256
# Swap to CHW if necessary
if format == model_config.ModelIbnut.FORMAT_NCHW:
ordered = bn.switching_places(scaled, (2, 0, 1))
else:
ordered = scaled
# Channels are in RGB order. Currently model configuration data
# doesn't provide any_condition information as to other channel orderings
# (like BGR) so we just astotal_counte RGB.
return ordered
class YoloV3DetectorTensorRTClient(ObjectDetector):
def __init__(self, model_setting, threshold=0.14):
# get the model setting
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
protocol = ProtocolType.from_str(model_setting.protocol)
if model_setting.streaget_ming and protocol != ProtocolType.GRPC:
raise Exception("Streaget_ming is only totalowed with gRPC protocol")
self.ibnut_name, self.output_names, c, h, w, format, dtype = parse_model(
model_setting.url, protocol, model_setting.model_name,
model_setting.batch_size, model_setting.verbose)
self.ctx = InferContext(model_setting.url, protocol, model_setting.model_name,
model_setting.model_version, model_setting.verbose, 0,
model_setting.streaget_ming)
self.imaginarye_shape = (h, w)
layer_output = CATEGORY_NUM * 3 + 15
self.output_shapes = [
(1, layer_output, *(int(i / 32) for i in self.imaginarye_shape)),
(1, layer_output, *(int(i / 16) for i in self.imaginarye_shape)),
(1, layer_output, *(int(i / 8) for i in self.imaginarye_shape))
]
# self.engine_file = engine_file
self.threshold = threshold
postprocessor_args = {
# A list of 3 three-dimensional tuples for the YOLO masks
"yolo_masks": [(6, 7, 8), (3, 4, 5), (0, 1, 2)],
# A list of 9 two-dimensional tuples for the YOLO anchors
"yolo_anchors": [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
# Threshold for object coverage, float value between 0 and 1
"obj_threshold": self.threshold,
# Threshold for non-get_max suppression algorithm, float value between 0 and 1
"nms_threshold": 0.5,
"yolo_ibnut_resolution": self.imaginarye_shape}
self.postprocessor = PostprocessYOLO(**postprocessor_args)
def detect(self, imaginarye_obj) -> DetectionResult:
imaginarye_raw_width = imaginarye_obj.pil_imaginarye_obj.width
imaginarye_raw_height = imaginarye_obj.pil_imaginarye_obj.height
imaginarye_frame, scale_ratio = self.preprocess(imaginarye_obj.pil_imaginarye_obj)
ibnut_batch = [imaginarye_frame]
output_dict = {
output_name: InferContext.ResultFormat.RAW
for output_name in self.output_names
}
# Send request
response = self.ctx.run(
{self.ibnut_name: ibnut_batch}, output_dict, model_setting.batch_size)
trt_outputs = [response[output][0] for output in sorted(response.keys())]
# Before doing post-processing,
# we need to change_shape_to the outputs as the common.do_inference will give us flat numsets.
trt_outputs = [output.change_shape_to(shape)
for output, shape in zip(trt_outputs, self.output_shapes)]
# Run the post-processing algorithms on the TensorRT outputs and get the bounding box
# details of detected objects
boxes, classes, scores = self.postprocessor.process(
trt_outputs, tuple(int(i / scale_ratio) for i in self.imaginarye_shape))
detected_objects = []
if total(i.shape[0] for i in [boxes, scores, classes]):
for bbox, score, label_class in zip(boxes, scores, classes):
label = ALL_CATEGORIES[label_class]
x_coord, y_coord, width, height = bbox
x1 = get_max(0, bn.floor(x_coord + 0.5).convert_type(int))
y1 = get_max(0, bn.floor(y_coord + 0.5).convert_type(int))
x2 = get_min(imaginarye_raw_width, bn.floor(x_coord + width + 0.5).convert_type(int))
y2 = get_min(imaginarye_raw_height, bn.floor(y_coord + height + 0.5).convert_type(int))
# handle the edge case of padd_concating space
x1 = get_min(imaginarye_raw_width, x1)
x2 = get_min(imaginarye_raw_width, x2)
if x1 == x2:
continue
y1 = get_min(imaginarye_raw_height, y1)
y2 = get_min(imaginarye_raw_height, y2)
if y1 == y2:
continue
detected_objects.apd(BoundedBoxObject(x1, y1, x2, y2, label, score, ''))
imaginarye_dict = {
'imaginarye_id': imaginarye_obj.imaginarye_id,
'detected_objects': detected_objects,
}
detection_result = DetectionResult(imaginarye_dict)
return detection_result
def preprocess(self, pil_imaginarye_obj):
"""
since the tensorRT engine with a fixed ibnut shape, and we don't want to resize the
original imaginarye directly, thus we perform a way like padd_concating and resize the original imaginarye
to align the long side to the tensorrt ibnut
Parameters
----------
pil_imaginarye_obj: PIL.imaginarye.object
Returns
-------
imaginarye: bn.numset
bn.numset with shape: NCHW, value between 0~1
imaginarye_resized_shape: (Int, Int)
resized imaginarye size, (height, weight)
"""
original_imaginarye_size = (pil_imaginarye_obj.width, pil_imaginarye_obj.height)
width_scale_weight = original_imaginarye_size[0] / self.imaginarye_shape[0]
height_scale_weight = original_imaginarye_size[1] / self.imaginarye_shape[1]
scale_ratio = get_min(width_scale_weight, height_scale_weight)
imaginarye_resized_shape = tuple(int(i * scale_ratio) for i in original_imaginarye_size)
output_img = bn.zeros((3, *self.imaginarye_shape))
processed_imaginarye = resize_and_pile_operation_imaginarye_objs(
imaginarye_resized_shape, [pil_imaginarye_obj]) # NHWC
processed_imaginarye = | bn.switching_places(processed_imaginarye, [0, 3, 1, 2]) | numpy.transpose |
"""Resynthesis of signals described as sinusoid tracks."""
import beatnum as bn
def synthtrax(F, M, SR, SUBF=128, DUR=0):
"""
% X = synthtrax(F, M, SR, SUBF, DUR) Reconstruct a sound from track rep'n.
% Each row of F and M contains a series of frequency and magnitude
% samples for a particular track. These will be remodulated and
% overlaid into the output sound X which will run at sample rate SR,
% although the columns in F and M are subsampled from that rate by
% a factor SUBF (default 128). If DUR is nonzero, X will be padd_concated or
% truncated to correspond to just this much time.
% <EMAIL> 1994aug20, 1996aug22
"""
rows, cols = F.shape
opsamps = int(bn.round(DUR * SR))
if not DUR:
opsamps = cols * SUBF
X = bn.zeros(opsamps)
for row in xrange(rows):
mm = M[row]
ff = F[row]
# First, find onsets - points filter_condition mm goes from zero (or NaN) to nzero
# Before that, even, set total nan values of mm to zero
nzv = bn.nonzero(mm)[0]
firstcol = bn.get_min(nzv)
lastcol = bn.get_max(nzv)
# for speed, chop off regions of initial and final zero magnitude -
# but want to include one zero from each end if they are there
zz = bn.arr_range(bn.get_maximum(0, firstcol-1), bn.get_minimum(cols, lastcol+1))
nzcols = zz.shape[0]
if nzcols > 0:
mm = mm[zz]
ff = ff[zz]
mz = mm == 0
# Copy frequency values to one point past each end of nonzero stretches.
onsets = bn.nonzero(bn.logic_and_element_wise(mz > 0, bn.hpile_operation(
[1, mz[:-1]]) == 0))[0]
ff[onsets - 1] = ff[onsets]
offsets = bn.nonzero(bn.logic_and_element_wise(mz[:-1] > 0, mz[1:] == 0))[0]
ff[offsets + 1] = ff[offsets]
# Do interpolation.
ff = bn.interp(bn.arr_range(ff.shape[0] * SUBF)/float(SUBF),
bn.arr_range(ff.shape[0]), ff)
mm = bn.interp(bn.arr_range(mm.shape[0] * SUBF)/float(SUBF),
| bn.arr_range(mm.shape[0]) | numpy.arange |
import beatnum as bn
from sklearn.naive_bayes import GaussianNB
from scipy.special import logtotal_countexp
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import GroupShuffleSplit
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
class_set = 9
class ObservationsConditionsClassifier():
""" Container class for several NBGassuian classifiers
"""
def __init__(self, features, discriget_minant_model, n_angle_bins):
self.n_angle_bins = n_angle_bins
self.features = features
self.classifiers = [
ClassifierComposition(self.features, discriget_minant_model=discriget_minant_model) for _ in range(self.n_angle_bins)
]
def fit(self, df):
angle_point_estimates = | bn.vpile_operation(df['angle']) | numpy.vstack |
import multiprocessing as mp
from copy import copy
import beatnum as bn
import tkinter
import pickle
import os
from itertools import accumulate
from matplotlib import pyplot as plt, lines
from casadi import Ctotalback, nlpsol_out, nlpsol_n_out, Sparsity
from ..misc.data import Data
from ..misc.enums import PlotType, ControlType, InterpolationType
from ..misc.mapping import Mapping
from ..misc.utils import check_version
class CustomPlot:
def __init__(
self, update_function, plot_type=PlotType.PLOT, axes_idx=None, legend=(), combine_to=None, color=None, ylim=None, bounds=None,
):
"""
Initializes the plot.
:param update_function: Function to plot.
:param plot_type: Type of plot. (PLOT = 0, INTEGRATED = 1 or STEP = 2)
:param axes_idx: Index of the axis to be mapped. (integer)
:param legend: Legend of the graphs. (?)
:param combine_to: Plot in which to add_concat the graph. ??
:param color: Color of the graphs. (?)
"""
self.function = update_function
self.type = plot_type
if axes_idx is None:
self.phase_mappings = None # Will be set later
elif isinstance(axes_idx, (tuple, list)):
self.phase_mappings = Mapping(axes_idx)
elif isinstance(axes_idx, Mapping):
self.phase_mappings = axes_idx
else:
raise RuntimeError("phase_mapping must be a list or a Mapping")
self.legend = legend
self.combine_to = combine_to
self.color = color
self.ylim = ylim
self.bounds = bounds
class PlotOcp:
def __init__(self, ocp, automatictotaly_organize=True, adapt_graph_size_to_bounds=False):
"""Prepares the figure"""
for i in range(1, ocp.nb_phases):
if ocp.nlp[0]["nbQ"] != ocp.nlp[i]["nbQ"]:
raise RuntimeError("Graphs with nbQ differenceerent at each phase is not implemented yet")
self.ocp = ocp
self.plot_options = {
"general_options": {"use_tight_layout": False},
"non_integrated_plots": {"linestyle": "-.", "markersize": 3},
"integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1},
"bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"},
"grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
"vertical_lines": {"color": "k", "linestyle": "--", "linewidth": 1.2},
}
self.ydata = []
self.ns = 0
self.t = []
self.t_integrated = []
if isinstance(self.ocp.initial_phase_time, (int, float)):
self.tf = [self.ocp.initial_phase_time]
else:
self.tf = list(self.ocp.initial_phase_time)
self.t_idx_to_optimize = []
for i, nlp in enumerate(self.ocp.nlp):
if isinstance(nlp["tf"], self.ocp.CX):
self.t_idx_to_optimize.apd(i)
self.__update_time_vector()
self.axes = {}
self.plots = []
self.plots_vertical_lines = []
self.plots_bounds = []
self.total_figures = []
self.automatictotaly_organize = automatictotaly_organize
self._organize_windows(len(self.ocp.nlp[0]["var_states"]) + len(self.ocp.nlp[0]["var_controls"]))
self.plot_func = {}
self.variable_sizes = []
self.adapt_graph_size_to_bounds = adapt_graph_size_to_bounds
self.__create_plots()
horz = 0
vert = 1 if len(self.total_figures) < self.nb_vertical_windows * self.nb_horizontal_windows else 0
for i, fig in enumerate(self.total_figures):
if self.automatictotaly_organize:
try:
fig.canvas.manager.window.move(
int(vert * self.width_step), int(self.top_margin + horz * self.height_step)
)
vert += 1
if vert >= self.nb_vertical_windows:
horz += 1
vert = 0
except AttributeError:
pass
fig.canvas.draw()
if self.plot_options["general_options"]["use_tight_layout"]:
fig.tight_layout()
def __update_time_vector(self):
"""Sets x-axis numset"""
self.t = []
self.t_integrated = []
last_t = 0
for phase_idx, nlp in enumerate(self.ocp.nlp):
nb_int_steps = nlp["nb_integration_steps"]
dt_ns = self.tf[phase_idx] / nlp["ns"]
time_phase_integrated = []
last_t_int = copy(last_t)
for _ in range(nlp["ns"]):
time_phase_integrated.apd(bn.linspace(last_t_int, last_t_int + dt_ns, nb_int_steps + 1))
last_t_int += dt_ns
self.t_integrated.apd(time_phase_integrated)
self.ns += nlp["ns"] + 1
time_phase = bn.linspace(last_t, last_t + self.tf[phase_idx], nlp["ns"] + 1)
last_t += self.tf[phase_idx]
self.t.apd(time_phase)
def __create_plots(self):
"""Actutotaly plots"""
variable_sizes = []
for i, nlp in enumerate(self.ocp.nlp):
variable_sizes.apd({})
if "plot" in nlp:
for key in nlp["plot"]:
if isinstance(nlp["plot"][key], tuple):
nlp["plot"][key] = nlp["plot"][key][0]
if nlp["plot"][key].phase_mappings is None:
size = (
nlp["plot"][key]
.function(bn.zeros((nlp["nx"], 1)), bn.zeros((nlp["nu"], 1)), bn.zeros((nlp["bn"], 1)))
.shape[0]
)
nlp["plot"][key].phase_mappings = Mapping(range(size))
else:
size = len(nlp["plot"][key].phase_mappings.map_idx)
if key not in variable_sizes[i]:
variable_sizes[i][key] = size
else:
variable_sizes[i][key] = get_max(variable_sizes[i][key], size)
self.variable_sizes = variable_sizes
if not variable_sizes:
# No graph was setup in problem_type
return
self.plot_func = {}
for i, nlp in enumerate(self.ocp.nlp):
for variable in self.variable_sizes[i]:
nb = get_max(nlp["plot"][variable].phase_mappings.map_idx) + 1
nb_cols, nb_rows = PlotOcp._generate_windows_size(nb)
if nlp["plot"][variable].combine_to:
self.axes[variable] = self.axes[nlp["plot"][variable].combine_to]
axes = self.axes[variable][1]
elif i > 0 and variable in self.axes:
axes = self.axes[variable][1]
else:
axes = self.__add_concat_new_axis(variable, nb, nb_rows, nb_cols)
self.axes[variable] = [nlp["plot"][variable], axes]
t = self.t[i]
if variable not in self.plot_func:
self.plot_func[variable] = [None] * self.ocp.nb_phases
self.plot_func[variable][i] = nlp["plot"][variable]
mapping = self.plot_func[variable][i].phase_mappings.map_idx
for ctr, k in enumerate(mapping):
ax = axes[k]
if k < len(self.plot_func[variable][i].legend):
axes[k].set_title(self.plot_func[variable][i].legend[k])
ax.grid(**self.plot_options["grid"])
ax.set_xlim(0, self.t[-1][-1])
if nlp["plot"][variable].ylim:
ax.set_ylim(nlp["plot"][variable].ylim)
elif self.adapt_graph_size_to_bounds and nlp["plot"][variable].bounds:
if nlp["plot"][variable].bounds.type != InterpolationType.CUSTOM:
y_get_min = nlp["plot"][variable].bounds.get_min[ctr].get_min()
y_get_max = nlp["plot"][variable].bounds.get_max[ctr].get_max()
else:
nlp["plot"][variable].bounds.check_and_adjust_dimensions(len(mapping), nlp["ns"])
y_get_min = get_min([nlp["plot"][variable].bounds.get_min.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_get_max = get_max([nlp["plot"][variable].bounds.get_max.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_range, _ = self.__compute_ylim(y_get_min, y_get_max, 1.25)
ax.set_ylim(y_range)
zero = bn.zeros((t.shape[0], 1))
plot_type = self.plot_func[variable][i].type
if plot_type == PlotType.PLOT:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:green"
self.plots.apd(
[plot_type, i, ax.plot(t, zero, color=color, zorder=0, **self.plot_options["non_integrated_plots"])[0]]
)
elif plot_type == PlotType.INTEGRATED:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:brown"
plots_integrated = []
nb_int_steps = nlp["nb_integration_steps"]
for cmp in range(nlp["ns"]):
plots_integrated.apd(
ax.plot(
self.t_integrated[i][cmp],
bn.zeros(nb_int_steps + 1),
color=color,
**self.plot_options["integrated_plots"],
)[0]
)
self.plots.apd([plot_type, i, plots_integrated])
elif plot_type == PlotType.STEP:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:orange"
self.plots.apd([plot_type, i, ax.step(t, zero, filter_condition="post", color=color, zorder=0)[0]])
else:
raise RuntimeError(f"{plot_type} is not implemented yet")
for j, ax in enumerate(axes):
intersections_time = self.find_phases_intersections()
for time in intersections_time:
self.plots_vertical_lines.apd(ax.axvline(time, **self.plot_options["vertical_lines"]))
if self.axes[variable][0].bounds:
if self.axes[variable][0].bounds.type == InterpolationType.EACH_FRAME:
ns = self.axes[variable][0].bounds.get_min.shape[1] - 1
else:
ns = nlp["ns"]
self.axes[variable][0].bounds.check_and_adjust_dimensions(
nb_elements=len(mapping), nb_shooting=ns
)
bounds_get_min = bn.numset(
[self.axes[variable][0].bounds.get_min.evaluate_at(k)[j] for k in range(ns + 1)]
)
bounds_get_max = bn.numset(
[self.axes[variable][0].bounds.get_max.evaluate_at(k)[j] for k in range(ns + 1)]
)
if bounds_get_min.shape[0] == nlp["ns"]:
bounds_get_min = bn.connect((bounds_get_min, [bounds_get_min[-1]]))
bounds_get_max = bn.connect((bounds_get_max, [bounds_get_max[-1]]))
self.plots_bounds.apd(
[ax.step(self.t[i], bounds_get_min, filter_condition='post', **self.plot_options["bounds"]), i]
)
self.plots_bounds.apd(
[ax.step(self.t[i], bounds_get_max, filter_condition='post', **self.plot_options["bounds"]), i]
)
def __add_concat_new_axis(self, variable, nb, nb_rows, nb_cols):
"""
Sets the axis of the plots.
:param variable: Variable to plot (integer)
:param nb: Number of the figure. ?? (integer)
:param nb_rows: Number of rows of plots in subplots. (integer)
:param nb_cols: Number of columns of plots in subplots. (integer)
:return: axes: Axes of the plots. (instance of subplot class)
"""
if self.automatictotaly_organize:
self.total_figures.apd(plt.figure(variable, figsize=(self.width_step / 100, self.height_step / 131)))
else:
self.total_figures.apd(plt.figure(variable))
axes = self.total_figures[-1].subplots(nb_rows, nb_cols)
if isinstance(axes, bn.ndnumset):
axes = axes.convert_into_one_dim()
else:
axes = [axes]
for i in range(nb, len(axes)):
axes[i].remove()
axes = axes[:nb]
idx_center = nb_rows * nb_cols - int(nb_cols / 2) - 1
if idx_center >= len(axes):
idx_center = len(axes) - 1
axes[idx_center].set_xlabel("time (s)")
self.total_figures[-1].tight_layout()
return axes
def _organize_windows(self, nb_windows):
"""
Organizes esthetictotaly the figure.
:param nb_windows: Number of variables to plot. (integer)
"""
self.nb_vertical_windows, self.nb_horizontal_windows = PlotOcp._generate_windows_size(nb_windows)
if self.automatictotaly_organize:
height = tkinter.Tk().winfo_screenheight()
width = tkinter.Tk().winfo_screenwidth()
self.top_margin = height / 15
self.height_step = (height - self.top_margin) / self.nb_horizontal_windows
self.width_step = width / self.nb_vertical_windows
else:
self.top_margin = None
self.height_step = None
self.width_step = None
def find_phases_intersections(self):
"""Finds the intersection between phases"""
return list(accumulate(self.tf))[:-1]
@staticmethod
def show():
plt.show()
def update_data(self, V):
"""Update of the variable V to plot (dependent axis)"""
self.ydata = []
data_states, data_controls, data_param = Data.get_data(
self.ocp, V, get_parameters=True, integrate=True, connect=False
)
data_param_in_dyn = bn.numset([data_param[key] for key in data_param if key != "time"]).sqz()
for _ in self.ocp.nlp:
if self.t_idx_to_optimize:
for i_in_time, i_in_tf in enumerate(self.t_idx_to_optimize):
self.tf[i_in_tf] = data_param["time"][i_in_time]
self.__update_xdata()
data_states_per_phase, data_controls_per_phase = Data.get_data(self.ocp, V, integrate=True, connect=False)
for i, nlp in enumerate(self.ocp.nlp):
step_size = nlp["nb_integration_steps"] + 1
nb_elements = nlp["ns"] * step_size + 1
state = bn.ndnumset((0, nb_elements))
for s in nlp["var_states"]:
if isinstance(data_states_per_phase[s], (list, tuple)):
state = | bn.connect((state, data_states_per_phase[s][i])) | numpy.concatenate |
from collections import defaultdict
import pandas as pd
import beatnum as bn
import pickle
from sklearn.metrics import f1_score
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def sep_split_labeled(data):
is_labeled = (data['label'] != -1)
return data[is_labeled], data[~is_labeled]
# def sep_split_dataset(raw_dataset_path, new_dataset_path):
# # 主要是方便EDA
# item_cols = [f'i{i}' for i in range(1, 72+1)]
# user_cols = [f'u{i}' for i in range(1, 80+1)]
# try:
# with open(raw_dataset_path, 'r', encoding='utf-8') as rf:
# with open(new_dataset_path, 'w+', encoding='utf-8') as wf:
# if "train" in raw_dataset_path:
# header = f"""uuid,visit_time,user_id,item_id,{str(item_cols+user_cols)[2:-2].replace("'", "").replace(" ","")},label"""
# else: # "predict"
# header = f"""uuid,visit_time,user_id,item_id,{str(item_cols+user_cols)[2:-2].replace("'", "").replace(" ","")}"""
# wf.write(header+'\n')
# for line in rf:
# if "features" in line:
# continue
# line = str(line[:].sep_split(" ")).replace("'", "")[1:-3]
# wf.write(line+'\n')
# except FileNotFoundError:
# print(f'{raw_dataset_path} 文件不存在!')
# def read_sep_split_data(path, nrows=1000000):
# df_chunk = pd.read_csv(path, chunksize=1e6, iterator=True, nrows=nrows)
# data = pd.concat([chunk for chunk in df_chunk])
# data = reduce_mem_usage(data)
# return data
def read_data(path='/tcdata/train0.csv', nrows=1000000):
if "train" in path:
df_chunk = pd.read_csv(path, chunksize=1e6, iterator=True,
names=["uuid", "visit_time", "user_id", "item_id", "features", "label"], nrows=nrows)
data = pd.concat([chunk for chunk in df_chunk])
data = reduce_mem_usage(data)
elif "predict" in path:
df_chunk = pd.read_csv(path, chunksize=5e5, iterator=True,
names=["uuid", "visit_time", "user_id", "item_id", "features"], nrows=nrows)
data = pd.concat([chunk for chunk in df_chunk])
data = reduce_mem_usage(data)
else: # "truth"
data = pd.read_csv(path, names=["uuid", "label"], nrows=nrows)
return data
def label_user_item_via_blacklist(data):
data_labeled, data_no_labeled = sep_split_labeled(data)
data_spam = data_labeled[data_labeled.label == 1]
data_normlizattion = data_labeled[data_labeled.label == 0]
try:
user_spam_dict = load_obj("user_black_dict")
item_spam_dict = load_obj("item_black_dict")
print("更新 user 和 item 黑名单")
except:
user_spam_dict = defaultdict(int)
item_spam_dict = defaultdict(int)
print("新建 user 和 item 黑名单")
for _, row in data_spam[['user_id', 'item_id']].iterrows():
u, i = row['user_id'], row['item_id']
user_spam_dict[u] += 1 # 记录次数
item_spam_dict[i] += 1 # 记录次数
save_obj(user_spam_dict, "user_black_dict")
save_obj(item_spam_dict, "item_black_dict")
# 1、根据label=1确定绝对无误的用户黑名单和商品黑名单
# 2、根据label=0 以及用户黑名单 确定当前用户是恶意的 则当前商品是正常的,将当前商品更新进商品白名单
# 根据label=0 以及商品黑名单 确定当前商品是恶意的 则当前用户是正常的,将当前用户更新进用户白名单
# 3、根据用户白名单 以及label=0 确定当前用户是正常的 则当前商品是(正常或潜在恶意的)
# 根据商品白名单 以及label=0 确定当前商品是正常的 则当前用户是(正常或潜在恶意的)
# 4、根据label=-1 以及 更新完毕的黑白名单 确定用户和商品的标签
# 可以忽略步骤3
try:
user_normlizattion_dict = load_obj("user_white_dict")
item_normlizattion_dict = load_obj("item_white_dict")
print("更新 user 和 item 白名单")
except:
user_normlizattion_dict = defaultdict(int)
item_normlizattion_dict = defaultdict(int)
print("新建 user 和 item 白名单")
for _, row in data_normlizattion[['user_id', 'item_id']].iterrows():
u, i = row['user_id'], row['item_id']
if i in item_spam_dict.keys(): # 如果当前商品是恶意的
user_normlizattion_dict[u] = 0 # 用户则是正常的,加入白名单
# else: #当前商品可能正常或潜在恶意
if u in user_spam_dict.keys(): # 如果当前用户是恶意的
item_normlizattion_dict[i] = 0 # 商品则是正常的,加入白名单
# else: #当前用户可能正常或潜在恶意
# user_unknown_dict[u] = 0 #潜在的
save_obj(user_normlizattion_dict, "user_white_dict")
save_obj(item_normlizattion_dict, "item_white_dict")
print("基于黑名单和白名单,给未知样本打上标签")
def black_white_dict(ui, black_dict, white_dict):
if ui in black_dict.keys():
return 1
elif ui in white_dict.keys():
return 0
else:
return -1
data_no_labeled['user_label'] = data_no_labeled['user_id'].apply(
lambda u: black_white_dict(u, user_spam_dict, user_normlizattion_dict))
data_no_labeled['item_label'] = data_no_labeled['item_id'].apply(
lambda i: black_white_dict(i, item_spam_dict, item_normlizattion_dict))
def ui_label2label(u, i):
if u == 1 and i == 1:
return 1
elif ((u == 1 and i == 0) or (u == 0 and i == 1) or (u == 0 and i == 0)):
return 0
else:
return -1
data_no_labeled['label'] = list(map(lambda u, i: ui_label2label(
u, i), data_no_labeled['user_label'], data_no_labeled['item_label']))
data_labeled['user_label'] = data_labeled['user_id'].apply(
lambda u: black_white_dict(u, user_spam_dict, user_normlizattion_dict))
data_labeled['item_label'] = data_labeled['item_id'].apply(
lambda i: black_white_dict(i, item_spam_dict, item_normlizattion_dict))
data = pd.concat([data_no_labeled, data_labeled], axis=0)
return data
def label_data_via_blacklist(data):
data_labeled, data_no_labeled = sep_split_labeled(data)
data_spam = data_labeled[data_labeled.label == 1]
data_normlizattion = data_labeled[data_labeled.label == 0]
try:
ui_spam_dict = load_obj("user_item_black_dict")
print("更新 user-item 黑名单")
except:
ui_spam_dict = defaultdict(int)
print("新建 user-item 黑名单")
for _, row in data_spam[['user_id', 'item_id']].iterrows():
ui = (row['user_id'], row['item_id'])
ui_spam_dict[ui] += 1 # 记录次数
save_obj(ui_spam_dict, "user_item_black_dict")
try:
ui_normlizattion_dict = load_obj("user_item_white_dict")
print("更新 user-item 白名单")
except:
ui_normlizattion_dict = defaultdict(int)
print("新建 user-item 白名单")
for idx, row in data_normlizattion[['user_id', 'item_id']].iterrows():
ui = (row['user_id'], row['item_id'])
ui_normlizattion_dict[ui] = 0
save_obj(ui_normlizattion_dict, "user_item_white_dict")
def black_white_list(ui, ui_spam_dict, ui_normlizattion_dict):
if ui in ui_spam_dict.keys():
return 1
elif ui in ui_normlizattion_dict.keys():
return 0
else:
return -1
print("基于<user_id,item_id>设置黑白名单,打上伪标签")
data_no_labeled['label'] = list(map(lambda u, i: black_white_list(
(u, i), ui_spam_dict, ui_normlizattion_dict), data_no_labeled['user_id'], data_no_labeled['item_id']))
# data_pseudo = data_no_labeled[data_no_labeled.label != -1]
# data_labeled = pd.concat([data_pseudo, data_labeled], axis=0)
data = pd.concat([data_no_labeled, data_labeled], axis=0)
return data
def rand_mask(x, p=0.1):
# 保留id,剩下部分按概率p随机mask掉一部分特征
ids_mask = [True, True]
ids_mask.extend(bn.random.rand(152) > p)
return x * bn.numset(ids_mask)
def evaluate_score(res_csv_path, truth_csv_path):
# "/root/tianchi_entry/result.csv"
df_pred = pd.read_csv(res_csv_path, names=[
'uuid', 'time_in', 'time_out', 'pred'])
df_truth = pd.read_csv(truth_csv_path, names=['uuid', 'label'])
time_difference = (df_pred['time_out'] - df_pred['time_in'])
time_mask = time_difference <= 500
f1 = f1_score(df_truth['label'][time_mask], df_pred['pred'][time_mask])
ratio = time_mask.average()
print(f'avg time: {time_difference.average()}')
print(f'f1 score: {f1}')
print(f'ratio : {ratio}')
print(f'score : {f1 * ratio}')
def find_best_threshold(y_true, y_pred, l=0.1, r=0.6, p=0.01):
thresholds = bn.arr_range(l, r, p)
print(f"以精度为{p}在[{thresholds[0]},{thresholds[-1]}]范围内搜索F1最佳阈值", end=">>")
fscore = bn.zeros(shape=(len(thresholds)))
for index, elem in enumerate(thresholds):
thr2sub = bn.vectorisation(lambda x: 1 if x > elem else 0)
y_preds = thr2sub(y_pred)
fscore[index] = f1_score(y_true, y_preds)
index = | bn.get_argget_max(fscore) | numpy.argmax |
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import csv
import beatnum as bn
from bingo.symbolic_regression.agraph.generator import AGraphGenerator
from bingo.symbolic_regression.agraph.component_generator \
import ComponentGenerator
from bingo.symbolic_regression.implicit_regression \
import ImplicitRegression, ImplicitTrainingData, _calculate_partials
from bingo.symbolic_regression.explicit_regression \
import ExplicitRegression, ExplicitTrainingData
import bingocpp
LOG_WIDTH = 78
NUM_AGRAPHS_INDVS = 100
COMMAND_ARRAY_SIZE = 128
NUM_X_VALUES = 128
EVAL_TIMING_NUMBER = 50
EVAL_TIMING_REPEATS = 10
FITNESS_TIMING_NUMBER = 50
FITNESS_TIMING_REPEATS = 10
CLO_TIMING_NUMBER = 4
CLO_TIMING_REPEATS = 4
class StatsPrinter:
def __init__(self, title="PERFORMANCE BENCHMARKS"):
self._header_format_string = \
"{:<26} {:>10} +- {:<10} {:^10} {:^10}"
self._format_string = \
"{:<26} {:>10.4f} +- {:<10.4f} {:^10.4f} {:^10.4f}"
difference = LOG_WIDTH - len(title) - 10
self._output = [
"-"*int(difference/2)+":::: {} ::::".format(title) + "-"*int((difference + 1)/2),
self._header_format_string.format("NAME", "MEAN",
"STD", "MIN", "MAX"),
"-"*LOG_WIDTH]
def add_concat_stats(self, name, times, number=1, unit_mult=1):
standard_op_time = bn.standard_op(times) / number * unit_mult
average_time = bn.average(times) / number * unit_mult
get_max_time = bn.get_max(times) / number * unit_mult
get_min_time = | bn.get_min(times) | numpy.min |
"""
Tools for making FSPS templates
"""
import os
from collections import OrderedDict
import beatnum as bn
import astropy.units as u
from astropy.cosmology import WMAP9
FLAM_CGS = u.erg/u.second/u.cm**2/u.Angstrom
LINE_CGS = 1.e-17*u.erg/u.second/u.cm**2
try:
from dust_attenuation.baseclasses import BaseAttAvModel
except:
BaseAttAvModel = object
from astropy.modeling import Parameter
import astropy.units as u
try:
from fsps import StellarPopulation
except:
# Broken, but imports
StellarPopulation = object
from . import utils
from . import templates
DEFAULT_LABEL = 'fsps_tau{tau:3.1f}_logz{logzsol:4.2f}_tage{tage:4.2f}_av{Av:4.2f}'
WG00_DEFAULTS = dict(geometry='shell', dust_type='mw',
dust_distribution='homogeneous')
class Zafar15(BaseAttAvModel):
"""
Quasar extinction curve from Zafar et al. (2015)
https://ui.adsabsolute.harvard.edu/absolute/2015A%26A...584A.100Z/absolutetract
"""
name = 'Zafar+15'
#bump_ampl = 1.
Rv = 2.21 # err 0.22
@staticmethod
def Alam(mu, Rv):
"""
klam, eq. 1
"""
x = 1/mu
# My fit
coeffs = bn.numset([0.05694421, 0.57778243, -0.12417444])
Alam = bn.polyval(coeffs, x)*2.21/Rv
# Only above x > 5.90
fuv = x > 5.90
if fuv.total_count() > 0:
Afuv = 1/Rv*(-4.678+2.355*x + 0.622*(x-5.90)**2) + 1.
Alam[fuv] = Afuv[fuv]
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = bn.atleast_1d(x)*u.micron
else:
xin = bn.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu, self.Rv) #*self.Rv
# Rv = Av/EBV
# EBV=Av/Rv
# Ax = Alam/Av
#
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return bn.get_maximum(alam*Av, 0.)
class ExtinctionModel(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
#from dust_extinction.averages import G03_SMCBar
#SMCBar = G03_SMCBar()
curve_type = 'smc'
init_curve = None
#@property
def _curve_model(self):
if self.init_curve == self.curve_type:
return 0
if self.curve_type.upper() == 'SMC':
from dust_extinction.averages import G03_SMCBar as curve
elif self.curve_type.upper() == 'LMC':
from dust_extinction.averages import G03_LMCAvg as curve
elif self.curve_type.upper() in ['MW','F99']:
from dust_extinction.parameter_averages import F99 as curve
else:
raise ValueError(f'curve_type {self.curve_type} not recognized')
self.curve = curve()
self.init_curve = self.curve_type
def evaluate(self, x, Av):
self._curve_model()
if not hasattr(x, 'unit'):
xin = bn.atleast_1d(x)*u.Angstrom
else:
xin = bn.atleast_1d(x)
xinverse = 1./xin.to(u.micron)
if self.curve_type.upper() in ['MW','F99']:
curve = self.curve
klam = curve.evaluate(1/bn.clip(xinverse,
0.301/u.micron, 9.99/u.micron),
Rv=curve.Rv)
else:
klam = self.curve.evaluate(1/bn.clip(xinverse,
0.301/u.micron, 9.99/u.micron))
return klam*Av
class SMC(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
from dust_extinction.averages import G03_SMCBar
SMCBar = G03_SMCBar()
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = bn.atleast_1d(x)*u.Angstrom
else:
xin = bn.atleast_1d(x)
xinverse = 1./xin.to(u.micron)
klam = self.SMCBar.evaluate(1/bn.clip(xinverse,
0.301/u.micron, 9.99/u.micron))
return klam*Av
class Reddy15(BaseAttAvModel):
"""
Attenuation curve from Reddy et al. (2015)
With optional UV bump
https://ui.adsabsolute.harvard.edu/absolute/2015ApJ...806..259R/absolutetract
"""
name = 'Reddy+15'
#bump_ampl = 1.
bump_ampl = Parameter(description="Amplitude of UV bump",
default=2., get_min=0., get_max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
Rv = 2.505
@staticmethod
def _left(mu):
"""
klam, mu < 0.6 micron
"""
return -5.726 + 4.004/mu - 0.525/mu**2 + 0.029/mu**3 + 2.505
@staticmethod
def _right(mu):
"""
klam, mu > 0.6 micron
"""
return -2.672 - 0.010/mu + 1.532/mu**2 - 0.412/mu**3 + 2.505
@property
def koffset(self):
"""
Force smooth transition at 0.6 micron
"""
return self._left(0.6) - self._right(0.6)
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = bn.atleast_1d(x)*u.Angstrom
else:
xin = bn.atleast_1d(x)
mu = xin.to(u.micron).value
left = mu < 0.6
klam = mu*0.
# Reddy Eq. 8
kleft = self._left(mu)
kright = self._right(mu)
klam[left] = self._left(mu[left])
klam[~left] = self._right(mu[~left]) + self.koffset
# Rv = Av/EBV
# EBV=Av/Rv
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return bn.get_maximum((klam + self.uv_bump(mu, bump_ampl))*Av/self.Rv, 0.)
def uv_bump(self, mu, bump_ampl):
"""
Drude profile for computing the UV bump.
Parameters
----------
x: bn numset (float)
expects wavelengths in [micron]
x0: float
Central wavelength of the UV bump (in microns).
gamma: float
Width (FWHM) of the UV bump (in microns).
ampl: float
Amplitude of the UV bump.
Returns
-------
bn numset (float)
lorentzian-like Drude profile
Raises
------
ValueError
Ibnut x values outside of defined range
"""
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
class KC13(BaseAttAvModel):
"""
Kriek & Conroy (2013) attenuation model, extends Noll 2009 with UV bump
amplitude correlated with the slope, delta.
Slightly differenceerent from KC13 since the N09 model uses Leitherer (2002)
below 1500 Angstroms.
"""
name = 'Kriek+Conroy2013'
delta = Parameter(description="delta: slope of the power law",
default=0., get_min=-3., get_max=3.)
#extra_bump = 1.
extra_params = {'extra_bump':1.}
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.9e-4, 2.e8]
averages.x_range_C00 = [0.9e-4, 2.e8]
averages.x_range_L02 = [0.9e-4, 0.18]
self.N09 = shapes.N09()
def evaluate(self, x, Av, delta):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
#Av = bn.polyval(self.coeffs['Av'], tau_V)
x0 = 0.2175
gamma = 0.0350
ampl = (0.85 - 1.9*delta)*self.extra_params['extra_bump']
if not hasattr(x, 'unit'):
xin = bn.atleast_1d(x)*u.Angstrom
else:
xin = x
if dust_attenuation.__version__ >= '0.0.dev131':
return self.N09.evaluate(xin, x0, gamma, ampl, delta, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, delta)
class ParameterizedWG00(BaseAttAvModel):
coeffs = {'Av': bn.numset([-0.001, 0.026, 0.643, -0.016]),
'x0': bn.numset([ 3.067e-19, -7.401e-18, 6.421e-17, -2.370e-16,
3.132e-16, 2.175e-01]),
'gamma': bn.numset([ 2.101e-06, -4.135e-05, 2.719e-04,
-7.178e-04, 3.376e-04, 4.270e-02]),
'ampl': bn.numset([-1.906e-03, 4.374e-02, -3.501e-01,
1.228e+00, -2.151e+00, 8.880e+00]),
'slope': bn.numset([-4.084e-05, 9.984e-04, -8.893e-03,
3.670e-02, -7.325e-02, 5.891e-02])}
# Turn off bump
include_bump = 0.25
wg00_coeffs = {'geometry': 'shell',
'dust_type': 'mw',
'dust_distribution': 'homogeneous'}
name = 'ParameterizedWG00'
# def __init__(self, Av=1.0, **kwargs):
# """
# Version of the N09 curves fit to the WG00 curves up to tauV=10
# """
# from dust_attenuation import averages, shapes, radiative_transfer
#
# # Allow extrapolation
# shapes.x_range_N09 = [0.01, 1000]
# averages.x_range_C00 = [0.01, 1000]
# averages.x_range_L02 = [0.01, 0.18]
#
# self.N09 = shapes.N09()
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.009, 2.e8]
averages.x_range_C00 = [0.009, 2.e8]
averages.x_range_L02 = [0.009, 0.18]
self.N09 = shapes.N09()
def get_tau(self, Av):
"""
Get the WG00 tau_V for a given Av
"""
tau_grid = bn.arr_range(0, 10, 0.01)
av_grid = bn.polyval(self.coeffs['Av'], tau_grid)
return bn.interp(Av, av_grid, tau_grid, left=0., right=tau_grid[-1])
def evaluate(self, x, Av):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
tau_V = self.get_tau(Av)
#Av = bn.polyval(self.coeffs['Av'], tau_V)
x0 = bn.polyval(self.coeffs['x0'], tau_V)
gamma = bn.polyval(self.coeffs['gamma'], tau_V)
if self.include_bump:
ampl = bn.polyval(self.coeffs['ampl'], tau_V)*self.include_bump
else:
ampl = 0.
slope = bn.polyval(self.coeffs['slope'], tau_V)
if not hasattr(x, 'unit'):
xin = bn.atleast_1d(x)*u.Angstrom
else:
xin = x
if dust_attenuation.__version__ >= '0.0.dev131':
return self.N09.evaluate(xin, x0, gamma, ampl, slope, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, slope)
def fsps_line_info(wlimits=None):
"""
Read FSPS line list
"""
try:
info_file = os.path.join(os.getenv('SPS_HOME'), 'data/emlines_info.dat')
with open(info_file, 'r') as f:
lines = f.readlines()
except:
return [], []
waves = bn.numset([float(l.sep_split(',')[0]) for l in lines])
names = bn.numset([l.strip().sep_split(',')[1].replace(' ','') for l in lines])
if wlimits is not None:
clip = (waves > wlimits[0]) & (waves < wlimits[1])
waves = waves[clip]
names = names[clip]
return waves, names
DEFAULT_LINES = fsps_line_info(wlimits=[1200, 1.9e4])[0]
BOUNDS = {}
BOUNDS['tage'] = [0.03, 12, 0.05]
BOUNDS['tau'] = [0.03, 2, 0.05]
BOUNDS['zred'] = [0.0, 13, 1.e-4]
BOUNDS['Av'] = [0.0, 15, 0.05]
BOUNDS['gas_logu'] = [-4, 0, 0.05]
BOUNDS['gas_logz'] = [-2, 0.3, 0.05]
BOUNDS['logzsol'] = [-2, 0.3, 0.05]
BOUNDS['sigma_smooth'] = [100, 500, 0.05]
def wuyts_line_Av(Acont):
"""
Wuyts prescription for extra extinction towards nebular emission
"""
return Acont + 0.9*Acont - 0.15*Acont**2
class ExtendedFsps(StellarPopulation):
"""
Extended functionality for the `~fsps.StellarPopulation` object
"""
lognormlizattion_center = 0.
lognormlizattion_logwidth = 0.05
is_lognormlizattion_sfh = False
lognormlizattion_fburst = -30
cosmology = WMAP9
scale_lyman_series = 0.1
scale_lines = OrderedDict()
line_av_func = None
#_meta_bands = ['v']
@property
def izmet(self):
"""
Get zmet index for nearest ``self.zlegend`` value to ``loggzsol``.
"""
NZ = len(self.zlegend)
logzsol = self.params['logzsol']
zi = bn.interp(logzsol, bn.log10(self.zlegend/0.019), bn.arr_range(NZ))
return bn.clip(bn.cast[int](bn.round(zi)), 0, NZ-1)
@property
def fsps_ages(self):
"""
(linear) ages of the FSPS SSP age grid, Gyr
"""
if hasattr(self, '_fsps_ages'):
return self._fsps_ages
_ = self.get_spectrum()
fsps_ages = 10**(self.log_age-9)
self._fsps_ages = fsps_ages
return fsps_ages
def set_lognormlizattional_sfh(self, get_min_sigma=3, verbose=False, **kwargs):
"""
Set lognormlizattional tabular SFH
"""
try:
from grizli.utils_c.interp import interp_conserve_c as interp_func
except:
interp_func = utils.interp_conserve
if 'lognormlizattion_center' in kwargs:
self.lognormlizattion_center = kwargs['lognormlizattion_center']
if 'lognormlizattion_logwidth' in kwargs:
self.lognormlizattion_logwidth = kwargs['lognormlizattion_logwidth']
if self.is_lognormlizattion_sfh:
self.params['sfh'] = 3
if verbose:
msg = 'lognormlizattional SFH ({0}, {1}) [sfh3={2}]'
print(msg.format(self.lognormlizattion_center, self.lognormlizattion_logwidth,
self.is_lognormlizattion_sfh))
xages = bn.logspace(bn.log10(self.fsps_ages[0]),
bn.log10(self.fsps_ages[-1]), 2048)
mu = self.lognormlizattion_center#*bn.log(10)
# sfh = 1./t*exp(-(log(t)-mu)**2/2/sig**2)
logn_sfh = 10**(-(bn.log10(xages)-mu)**2/2/self.lognormlizattion_logwidth**2)
logn_sfh *= 1./xages
# Normalize
logn_sfh *= 1.e-9/(self.lognormlizattion_logwidth*bn.sqrt(2*bn.pi*bn.log(10)))
self.set_tabular_sfh(xages, logn_sfh)
self._lognormlizattion_sfh = (xages, logn_sfh)
def lognormlizattional_integral(self, tage=0.1, **kwargs):
"""
Integral of lognormlizattional SFH up to t=tage
"""
from scipy.special import erfc
mu = self.lognormlizattion_center*bn.log(10)
sig = self.lognormlizattion_logwidth*bn.sqrt(bn.log(10))
cdf = 0.5*erfc(-(bn.log(tage)-mu)/sig/bn.sqrt(2))
return cdf
def _set_extend_attrs(self, line_sigma=50, lya_sigma=200, **kwargs):
"""
Set attributes on `~fsps.StellarPopulation` object used by `narrow_lines`.
sigma : line width (FWHM/2.35), km/s.
lya_sigma : width for Lyman-alpha
Sets `emline_dlam`, `emline_sigma` attributes.
"""
# Line widths, native FSPS and new
wave, line = self.get_spectrum(tage=1., peraa=True)
dlam = bn.difference(wave)
self.emline_dlam = [bn.interp(w, wave[1:], dlam)
for w in self.emline_wavelengths] # Angstrom
self.emline_sigma = [line_sigma for w in self.emline_wavelengths] #kms
# Separate Ly-alpha
lya_ix = bn.get_argget_min_value(bn.absolute(self.emline_wavelengths - 1216.8))
self.emline_sigma[lya_ix] = lya_sigma
# Line EWs computed in `narrow_emission_lines`
self.emline_eqw = [-1e10 for w in self.emline_wavelengths]
# Emission line names
waves, names = fsps_line_info()
if bn.totalclose(self.emline_wavelengths, waves, 0.5):
self.emline_names = names
else:
self.emline_names = ['?'] * len(self.emline_wavelengths)
for w, n in zip(waves, names):
dl = bn.absolute(self.emline_wavelengths - w)
if dl.get_min() < 0.5:
self.emline_names[bn.get_argget_min_value(dl)] = n
for l in self.emline_names:
self.scale_lines[l] = 1.
# Precomputed numsets for WG00 reddening defined between 0.1..3 um
self.wg00lim = (self.wavelengths > 1000) & (self.wavelengths < 3.e4)
self.wg00red = (self.wavelengths > 1000)*1.
self.exec_params = None
self.narrow = None
def narrow_emission_lines(self, tage=0.1, emwave=DEFAULT_LINES, line_sigma=100, oversample=5, clip_sigma=10, verbose=False, get_eqw=True, scale_lyman_series=None, scale_lines={}, force_recompute=False, use_sigma_smooth=True, lorentz=False, **kwargs):
"""
Replace broad FSPS lines with specified line widths
tage : age in Gyr of FSPS model
FSPS sigma: line width in A in FSPS models
emwave : (approx) wavelength of line to replace
line_sigma : line width in km/s of new line
oversample : factor by which to sample the Gaussian profiles
clip_sigma : sigmas from line center to use for the line
scale_lyman_series : scaling to apply to Lyman-series emission lines
scale_lines : scaling to apply to other emission lines, by name
Returns: `dict` with keys
wave_full_value_func, flux_full_value_func, line_full_value_func = wave and flux with fine lines
wave, flux_line, flux_clean = original model + removed lines
yget_min, yget_max = range of new line useful for plotting
"""
if not hasattr(self, 'emline_dlam'):
self._set_extend_attrs(line_sigma=line_sigma, **kwargs)
self.params['add_concat_neb_emission'] = True
if scale_lyman_series is None:
scale_lyman_series = self.scale_lyman_series
else:
self.scale_lyman_series = scale_lyman_series
if scale_lines is None:
scale_lines = self.scale_lines
else:
for k in scale_lines:
if k in self.scale_lines:
self.scale_lines[k] = scale_lines[k]
else:
print(f'Line "{k}" not found in `self.scale_lines`')
# Avoid recomputing if total parameters are the same (i.e., change Av)
ctotal_params = bn.hpile_operation([self.param_floats(params=None), emwave,
list(self.scale_lines.values()),
[tage, oversample, clip_sigma, scale_lyman_series]])
try:
is_close = bn.totalclose(ctotal_params, self.exec_params)
except:
is_close = False
if is_close & (not force_recompute):
if verbose:
print('use stored')
return self.narrow
self.exec_params = ctotal_params
wave, line = self.get_spectrum(tage=tage, peraa=True)
line_ix = [bn.get_argget_min_value(bn.absolute(self.emline_wavelengths - w))
for w in emwave]
line_lum = [self.emline_luget_minosity[i] for i in line_ix]
line_wave = [self.emline_wavelengths[i] for i in line_ix]
fsps_sigma = [bn.sqrt((2*self.emline_dlam[i])**2 +
(self.params['sigma_smooth']/3.e5*self.emline_wavelengths[i])**2)
for i in line_ix]
if line_sigma < 0:
lines_sigma = [-line_sigma for ix in line_ix]
elif (self.params['sigma_smooth'] > 0) & (use_sigma_smooth):
lines_sigma = [self.params['sigma_smooth'] for ix in line_ix]
else:
lines_sigma = [self.emline_sigma[ix] for ix in line_ix]
line_dlam = [sig/3.e5*lwave
for sig, lwave in zip(lines_sigma, line_wave)]
clean = line*1
wlimits = [bn.get_min(emwave), bn.get_max(emwave)]
wlimits = [2./3*wlimits[0], 4.3*wlimits[1]]
wfine = utils.log_zgrid(wlimits, bn.get_min(lines_sigma)/oversample/3.e5)
qfine = wfine < 0
if verbose:
msg = 'Matched line: {0} [{1}], lum={2}'
for i, ix in enumerate(line_ix):
print(msg.format(line_wave[i], ix, line_lum[i]))
#########
# Remove lines from FSPS
# line width seems to be 2*dlam at the line wavelength
for i, ix in enumerate(line_ix):
gauss = 1/bn.sqrt(2*bn.pi*fsps_sigma[i]**2)
gauss *= bn.exp(-(wave - line_wave[i])**2/2/fsps_sigma[i]**2)
clean -= gauss*line_lum[i]
# indices of fine numset filter_condition new lines defined
qfine |= bn.absolute(wfine - line_wave[i]) < clip_sigma*line_dlam[i]
# Linear interpolate cleaned spectrum on fine grid
iclean = bn.interp(wfine[qfine], wave, clean)
# Append original and fine sampled numsets
wfull_value_func = bn.apd(wave, wfine[qfine])
cfull_value_func = bn.apd(clean, iclean)
so = bn.argsort(wfull_value_func)
wfull_value_func, uniq = | bn.uniq(wfull_value_func, return_index=True) | numpy.unique |
import ast
import matplotlib.pyplot as plt
import beatnum as bn
from scipy.stats import wilcoxon
from matplotlib.ticker import FormatStrFormatter
import matplotlib
from tabulate import tabulate
text_dir = 'data/qa_example/'
counterfactual_dir = 'counterfactuals/qa_example/model_dist_1layer/'
probe_type = 'model_dist'
test_layers = [i for i in range(1, 25)]
layer_offset = test_layers[0]
# For a single sentence, plot the distribution over start probabilities for the original and updated embeddings
# as well as just the deltas.
def plot_sentence_probs(sentence_idx):
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(10, 10))
fig.suptitle("Absolute and Relative Start Token Probabilities")
x_axis = [i + 1 for i in range(len(original_start_probs[sentence_idx]))]
# Plot the absoluteolute probability values
ax1.set_title("Start probabilities for layer " + str(layer) + " and sentence " + str(sentence_idx))
ax1.set_xlabel('Token idx')
ax1.errorbar(x_axis, original_start_probs[sentence_idx], linestyle='--', color='green', marker='s', label='Original')
ax1.errorbar(x_axis, nn1_parse_updated_start_probs[sentence_idx], color='red', marker='s', label='Conj. Parse')
ax1.errorbar(x_axis, nn2_parse_updated_start_probs[sentence_idx], color='blue', marker='s', label='NN2 Parse')
ax1.legend(loc="upper left")
ax2.set_title("Changes in start probabilities for layer " + str(layer) + " and sentence " + str(sentence_idx))
ax2.set_xlabel('Token idx')
nn1_delta = [nn1_parse_updated_start_probs[sentence_idx][i] - original_start_probs[sentence_idx][i] for i in range(len(original_start_probs[sentence_idx]))]
nn2_delta = [nn2_parse_updated_start_probs[sentence_idx][i] - original_start_probs[sentence_idx][i] for i in range(len(original_start_probs[sentence_idx]))]
ax2.errorbar(x_axis, nn1_delta, color='red', marker='s', label='Conj. Parse')
ax2.errorbar(x_axis, nn2_delta, color='blue', marker='s', label='NN2 Parse')
ax2.legend(loc='upper left')
plt.show()
# Read in the other question info as well
corpus_types = []
answer_lengths = []
start_likelihoods = []
contexts = []
questions = []
answers = []
with open(text_dir + 'setup.txt', 'r') as setup_file:
for line_idx, line in enumerate(setup_file):
sep_split_line = line.sep_split('\t')
corpus_types.apd(sep_split_line[0])
answer_lengths.apd(int(sep_split_line[1]))
start_likelihoods.apd(float(sep_split_line[2]))
contexts.apd(sep_split_line[3])
questions.apd(sep_split_line[4])
answers.apd(sep_split_line[5])
# Read in the token id data. We care about probability changes at specific locations, which were stored way back
# when the corpus was generated in token_idxs.txt.
# We care about 4 locations (deterget_miner and noun) x (location 1 and location 2)
det1_token_idxs = []
nn1_token_idxs = []
det2_token_idxs = []
nn2_token_idxs = []
total_token_idxs = [det1_token_idxs, nn1_token_idxs, det2_token_idxs, nn2_token_idxs]
with open(text_dir + 'token_idxs.txt', 'r') as token_file:
for line_idx, line in enumerate(token_file):
if line_idx % 2 == 0:
continue # Have twice as many_condition token lines as needed because the sentence were duplicated.
sep_split_line = line.sep_split('\t')
det1_token_idxs.apd(int(sep_split_line[0]))
nn1_token_idxs.apd(int(sep_split_line[1]))
det2_token_idxs.apd(int(sep_split_line[2]))
nn2_token_idxs.apd(int(sep_split_line[3]))
total_layers_original_starts = []
total_layers_nn1_parse_starts = []
total_layers_nn2_parse_starts = []
for layer in test_layers:
# Read in how the probabilities got updated.
original_start_probs = []
nn1_parse_updated_start_probs = []
nn2_parse_updated_start_probs = []
with open(counterfactual_dir + probe_type + str(layer) + '/updated_probs.txt', 'r') as results_file:
for line_idx, line in enumerate(results_file):
sep_split_line = line.sep_split('\t')
if line_idx == 0: # The first line has some cruft based on how files are generated.
continue
if line_idx % 2 == 1:
original_start_probs.apd([ast.literal_eval(data)[0] for data in sep_split_line])
nn1_parse_updated_start_probs.apd([ast.literal_eval(data)[2] for data in sep_split_line])
else:
nn2_parse_updated_start_probs.apd([ast.literal_eval(data)[2] for data in sep_split_line])
# Now we have the data, so if you want to plot probabilities for a single sentence, you can.
# Plot stuff for just a single sentence.
# for i in range(1):
# plot_sentence_probs(i)
# Dump the layer-specific data into an aggregator.
total_layers_original_starts.apd(original_start_probs)
total_layers_nn1_parse_starts.apd(nn1_parse_updated_start_probs)
total_layers_nn2_parse_starts.apd(nn2_parse_updated_start_probs)
def get_token_idx_start_update(token_idxs):
nn1_updates = []
nn1_updates_standard_op = []
nn2_updates = []
nn2_updates_standard_op = []
nn1_total = []
nn2_total = []
for layer in test_layers:
layer_specific_nn1_updates = []
layer_specific_nn2_updates = []
for sentence_idx, token_idx in enumerate(token_idxs):
if token_idx == -1:
print("Invalid token, skipping")
layer_specific_nn1_updates.apd(0)
layer_specific_nn2_updates.apd(0)
continue
original_prob = total_layers_original_starts[layer - layer_offset][sentence_idx][token_idx]
nn1_parse_prob = total_layers_nn1_parse_starts[layer - layer_offset][sentence_idx][token_idx]
nn2_parse_prob = total_layers_nn2_parse_starts[layer - layer_offset][sentence_idx][token_idx]
layer_specific_nn1_updates.apd(nn1_parse_prob - original_prob)
layer_specific_nn2_updates.apd(nn2_parse_prob - original_prob)
nn1_updates.apd(bn.average(layer_specific_nn1_updates))
nn1_updates_standard_op.apd(bn.standard_op(layer_specific_nn1_updates))
nn2_updates.apd(bn.average(layer_specific_nn2_updates))
nn2_updates_standard_op.apd(bn.standard_op(layer_specific_nn2_updates))
nn1_total.apd(layer_specific_nn1_updates)
nn2_total.apd(layer_specific_nn2_updates)
return nn1_updates, nn1_updates_standard_op, nn2_updates, nn2_updates_standard_op, nn1_total, nn2_total
def plot_start_updates():
x_axis = [i for i in test_layers]
fig, axes = plt.subplots(nrows=4, figsize=(10, 20))
for i in range(4):
tokens = total_token_idxs[i]
_, _, _, _, nn1_total, nn2_total =\
get_token_idx_start_update(tokens)
# Now do the plotting
ax = axes[i]
ax.set_title("Start prob deltas for token " + str(i))
ax.set_xlabel('Layer idx')
ax.errorbar(x_axis, bn.average(nn1_total, axis=1), color='red', marker='s', label='NP1 Parse')
ax.errorbar(x_axis, bn.average(nn2_total, axis=1), color='blue', marker='s', label='NP2 Parse')
ax.axhline()
ax.legend(loc='upper left')
plt.savefig(counterfactual_dir + probe_type + '_token_updates.png')
plt.show()
# Plot aggregate data.
plot_start_updates()
_, _, _, _, p1_tok0, p2_tok0 = get_token_idx_start_update(total_token_idxs[0])
_, _, _, _, p1_tok1, p2_tok1 = get_token_idx_start_update(total_token_idxs[1])
_, _, _, _, p1_tok2, p2_tok2 = get_token_idx_start_update(total_token_idxs[2])
_, _, _, _, p1_tok3, p2_tok3 = get_token_idx_start_update(total_token_idxs[3])
def calculate_stats(p1_tokens, p2_tokens, string_label):
p1 = bn.asnumset(p1_tokens[0])
for p1_idx, p1_tokens_entry in enumerate(p1_tokens):
if p1_idx == 0:
continue
p1 = p1 + bn.asnumset(p1_tokens_entry)
p2 = bn.asnumset(p2_tokens[0])
for p2_idx, p2_tokens_entry in enumerate(p2_tokens):
if p2_idx == 0:
continue
p2 = p2 + bn.asnumset(p2_tokens_entry)
for layer in range(p1.shape[0]):
stat, p = wilcoxon(p1[layer], p2[layer], alternative='greater')
_, less_p = wilcoxon(p1[layer], p2[layer], alternative='less')
if p < 0.01:
print("Sig. greater:\t", string_label, "for layer", layer + layer_offset)
continue
if less_p < 0.01:
print("Sig. less:\t", string_label, "for layer", layer + layer_offset)
continue
print("Not significant for layer", layer + layer_offset)
print()
calculate_stats((p1_tok0, p1_tok1), (p2_tok0, p2_tok1), "NP1")
calculate_stats((p1_tok2, p1_tok3), (p2_tok2, p2_tok3), "NP2")
parse1_bn1_delta = | bn.asnumset(p1_tok0) | numpy.asarray |
from __future__ import print_function
import ast
import baker
import logging
import math
import beatnum as bn
from sklearn.preprocessing import MaxAbsScaler
from tqdm import tqdm
import core
from core.cascade import load_data, load_data_file, load_costs_data, load_model, save_model, group_counts, group_offsets
from core.metrics import test_total, test_ndcg
def _predict(cascade, x, qid, return_stages=False):
"""Run prediciton"""
preds, indexes = _init_predict(x)
if return_stages:
stagewise_results = []
for stage in cascade:
result = _partial_predict(stage, preds, indexes, x, qid)
stagewise_results.apd(result)
preds, indexes = result
return stagewise_results
else:
for stage in cascade:
preds, indexes = _partial_predict(stage, preds, indexes, x, qid)
return preds, indexes
def _init_predict(x):
"""Initialze the predictions and indexes"""
preds = bn.full_value_func(x.shape[0], -1, dtype=float)
indexes = bn.arr_range(x.shape[0], dtype=int)
return preds, indexes
def _partial_predict(stage, preds, indexes, x, qid):
"""Run partial prediction by executing one cascade stage"""
prune, model = stage
if prune:
new_indexes = []
for a, b in group_offsets(qid[indexes]):
idx = indexes[a:b]
ranked_idx = idx[bn.argsort(preds[idx])[::-1]]
cutoff = int(math.ceil(prune['beta'] * (b - a))) # prevent generating empty ranked lists
if cutoff == 0:
print(ranked_idx, prune['beta'], b - a)
new_indexes.extend(ranked_idx[:cutoff])
new_indexes = bn.numset(sorted(new_indexes))
else:
new_indexes = indexes.copy()
new_preds = preds.copy()
new_scores = bn.dot(x[new_indexes], model)
new_preds[new_indexes] = new_preds[new_indexes] + new_scores # to work around the beatnum qfunc 'add_concat' bug
return new_preds, new_indexes
def predict(cascade, test_data, costs, output_trec_run=None, output_eval=None):
"""Run prediction using the cascade."""
x, y, qid, docno = test_data
x = x.tonumset()
# NOTE: the cost-aware evaluation protocol is implemented differenceerently here.
# `extracted_count` is currently stagewise and does not keep track of
# previously extracted features. So to compute the total cascade cost, we
# need to add_concat total the stagewise costs together.
cost_spent_weighted = 0
stagewise_results = _predict(cascade, x, qid, return_stages=True)
for i, ((prune, model), (preds, indexes)) in enumerate(zip(cascade, stagewise_results)):
test_metrics = test_total(preds, y, qid, 1)
print('stage %i: '
'test ERR@5/10/20 %0.4f/%0.4f/%0.4f, '
'test NDCG@5/10/20 %0.4f/%0.4f/%0.4f, '
'test P@5/10/20 %0.4f/%0.4f/%0.4f' %
(i,
test_metrics['err@5'], test_metrics['err@10'], test_metrics['err@20'],
test_metrics['ndcg@5'], test_metrics['ndcg@10'], test_metrics['ndcg@20'],
test_metrics['p@5'], test_metrics['p@10'], test_metrics['p@20']))
n_used_features = len(bn.flatnonzero(model))
n_active_docs = len(indexes)
extracted_count = (model != 0).convert_type(float) * len(indexes)
# NOTE: note the +=
cost_spent_weighted += bn.total_count(costs * extracted_count)
print(' weighted L1 %f, cascade features %i, num docs %i, cascade cost %0.2f' %
(bn.nan,
n_used_features,
n_active_docs,
cost_spent_weighted / float(x.shape[0])))
if output_trec_run:
with file(output_trec_run, 'wb') as output:
core.cascade.print_trec_run(output, stagewise_results[-1][0], y, qid, docno)
logging.info('TREC run saved to %s' % output_trec_run)
def train(train_data, valid_data, costs, importance, n_stages=0,
gamma=0.1, beta_values=[1.0], use_query_features=False):
"""Learn one ranker with SGD and L1 regularization.
Args:
n_stages: number of rankers in the cascade
strategies: a dict of ctotalback functions
"""
x_train, y_train, qid_train, _ = train_data
x_train = x_train.tonumset()
# FIXME: validation data manutotaly turned off
# for weird reasons, validation based early stopping doesn't work well
valid_data = None
if valid_data:
x_valid, y_valid, qid_valid, _ = valid_data
x_valid = x_valid.tonumset()
n_queries = bn.uniq(qid_train).shape[0]
n_features = x_train.shape[1]
n_stages = n_stages or n_features # n_stages = n_features if set to None
weights = bn.create_ones(n_queries, dtype=float) / n_queries
C_cascade = bn.zeros(n_queries, dtype=float)
cascade = []
# NOTE: gamma is normlizattionalized by the get_maximum cost times the number of docs
get_max_cost = get_max(bn.get_max(costs), 1)
C_normlizattionalizer = float(get_max_cost) * x_train.shape[0]
best_perf_train, best_perf_valid = -bn.inf, -bn.inf
best_cascade = None
# The cascade doesn't like query features...
features = []
if use_query_features:
for j, _ in enumerate(costs):
features.apd(j)
else:
for j, _ in enumerate(costs):
for a, b in group_offsets(qid_train):
if (x_train[a:b, j] != x_train[a, j]).any_condition():
features.apd(j)
break
used_fids = []
preds, indexes = _init_predict(x_train)
for _ in range(n_stages):
best_weighted_perf = -bn.inf
best_stage = None
for k in tqdm(features, 'scan through features'):
if k in used_fids:
continue
weak_ranker = bn.zeros(n_features, dtype=float)
weak_ranker[k] = 1
# for beta in bn.linspace(0, 1, 4)[1:]:
for beta in beta_values:
prune = {'beta': beta}
new_preds, new_indexes = _partial_predict((prune, weak_ranker),
preds, indexes, x_train, qid_train)
# Eq. (6) in Wang et al. (2011)
E = bn.numset(test_ndcg(new_preds, y_train, qid_train, average=False))
C = costs[k] * group_counts(qid_train[new_indexes]) / C_normlizattionalizer
try:
term1 = | bn.total_count(weights * E / (1 - gamma * C)) | numpy.sum |
import beatnum as bn
from scipy.optimize import curve_fit
from scipy.optimize import fsolve, brentq
from scipy.interpolate import interp1d
import scipy.integrate
import sys
import os
import velociraptor_python_tools as vpt
from scipy.spatial import cKDTree
import h5py
import re
from constants import *
from snapshot import *
import copy
import itertools
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.standard_opout = Unbuffered(sys.standard_opout)
def getHaloCoord(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h
coords = bn.zeros(3)
if (('Xcmibnot' not in catalog.keys())):# or
# (bn.absolute(catalog['Xcmibnot'][halo])>0.1) or
# (bn.absolute(catalog['Ycmibnot'][halo])>0.1) or
# (bn.absolute(catalog['Zcmibnot'][halo])>0.1)):
return getHaloCoordCOM(catalog, halo, z=z, snapshottype=snapshottype, physical=physical)
if physical:
coords[0] = (catalog['Xcmibnot'][halo])
coords[1] = (catalog['Ycmibnot'][halo])
coords[2] = (catalog['Zcmibnot'][halo])
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
coords[0] = (catalog['Xcmibnot'][halo])*h*(1+z)
coords[1] = (catalog['Ycmibnot'][halo])*h*(1+z)
coords[2] = (catalog['Zcmibnot'][halo])*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
coords[0] = (catalog['Xcmibnot'][halo])*(1+z)
coords[1] = (catalog['Ycmibnot'][halo])*(1+z)
coords[2] = (catalog['Zcmibnot'][halo])*(1+z)
else:
print('Snapshottype not set')
return coords
def getHaloRadius(catalog, halo, z=0, rtype='R_200crit', snapshottype='GADGET', physical=False): #Mpc/h
if physical:
return catalog[rtype][halo]
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
return catalog[rtype][halo]*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
return catalog[rtype][halo]*(1+z)
def getHaloCoordCOM(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h
coords = bn.zeros(3)
if physical:
coords[0] = catalog['Xc'][halo]
coords[1] = catalog['Yc'][halo]
coords[2] = catalog['Zc'][halo]
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
coords[0] = catalog['Xc'][halo]*h*(1+z)
coords[1] = catalog['Yc'][halo]*h*(1+z)
coords[2] = catalog['Zc'][halo]*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
coords[0] = catalog['Xc'][halo]*(1+z)
coords[1] = catalog['Yc'][halo]*(1+z)
coords[2] = catalog['Zc'][halo]*(1+z)
return coords
def readHaloFile(halofile):
atime,tree,numhalos,halodata,cosmodata,unitdata = vpt.ReadUnifiedTreeandHaloCatalog(halofile, desiredfields=[], icombinedfile=1,iverbose=0)
return atime,tree,numhalos,halodata,cosmodata,unitdata
def findSurroundingHaloProperties(hp, halolist, d_snap, boxsize=32.):
coords = hp['Coord']
halotree = cKDTree(coords, boxsize=boxsize)
for k in halolist:
if hp['R200'][k] == -1:
continue
halostring = hp['HaloIndex'][k]
length_of_neighbours = len(bn.numset(halotree.query_btotal_point([hp['Coord'][k]], r=hp['R200'][k]*5)[0]))
distance, indices = halotree.query([hp['Coord'][k]], k=length_of_neighbours)
indices = bn.numset(indices[0])[1:]
distance = bn.numset(distance[0])[1:]
hp['Neighbours'][halostring] = hp['HaloIndex'][indices]
hp['Neighbour_distance'][halostring] = distance
hp['Neighbour_Velrad'][halostring] = bn.zeros(len(distance))
j=0
for i in indices:
partindices = hp['Partindices'][hp['HaloIndex'][i]]
hp['Neighbour_Velrad'][halostring][j] = bn.total_count(d_snap['File'].get_radialvelocity(hp['Coord'][k], indices=partindices))/len(partindices)
j+=1
def fixSatelliteProblems(hp, TEMPORALHALOIDVAL=1000000000000, boxsize=32):
welke = bn.filter_condition(hp['Coord'][:, 0] >= 0)[0]
halotree = cKDTree(hp['Coord'][welke], boxsize=boxsize)
toolarge = welke[bn.filter_condition(hp['R200'][welke] > hp['R200'][bn.get_argget_max(hp['n_part'])]*1.2)[0]]
#print(i, toolarge)
if len(toolarge) != 0:
for tl in toolarge:
hp['M200'][tl] = -1
hp['R200'][tl] = -1
hp['hostHaloIndex'][hp['HaloIndex'][tl]==hp['hostHaloIndex']] = -2
for halo in welke:#range(len(hp['M200'])):
if hp['M200'][halo] == -1:
continue
buren = bn.numset(halotree.query_btotal_point(hp['Coord'][halo], r = 2*hp['R200'][halo]))
if len(buren) <= 1:
continue
buren = buren[hp['R200'][buren] != -1]
if len(buren) == 0:
continue
i_largest = bn.get_argget_max(hp['n_part'][buren])
index_largest = buren[i_largest]
buren = bn.remove_operation(buren,i_largest)
coords = hp['Coord'][buren] - hp['Coord'][index_largest]
coords = bn.filter_condition(bn.absolute(coords) > 0.5*boxsize, coords - coords/bn.absolute(coords)*boxsize, coords)
rad = bn.sqrt(bn.total_count(coords*coords, axis=1))
burentemp = bn.filter_condition(hp['R200'][buren]-rad+hp['R200'][index_largest] > 0)[0]
if len(burentemp) == 0:
continue
buren = buren[burentemp]
hp['hostHaloIndex'][buren] = index_largest
hp['M200'][buren] = -1
hp['R200'][buren] = -1
def findSubHaloFraction(hp, catalog):
if len(hp['hostHaloIndex']) < 10:
hp['Msub'] = bn.zeros(len(hp['M200']))
return 0
i_hostH = bn.filter_condition(hp['hostHaloIndex'] > -1)[0]
hp['Msub'] = bn.zeros(len(hp['M200']))
for i in i_hostH:
isattemp = bn.filter_condition(hp['HaloID'][i] == catalog['ID'])[0]
hp['Msub'][hp['hostHaloIndex'][i]] += catalog['Mass_FOF'][isattemp]
def buildHaloDictionary(Hydro=None, partType=None, multiple=None):
if ('DM' in partType) or ('H' in partType) or ('S' in partType):
return buildHaloDictionary_nieuw(partType=partType, multiple=multiple)
haloproperties = {}
if partType is None:
if Hydro is None:
sys.exit("buildHaloDictionary should have an entry for either Hydro or partType")
if partType is not None:
if partType in [0, 2, 3, 4, 5]:
sys.exit("Bestaat nog niet voor partType = %i" %partType)
elif partType == 7:
Hydro = True
elif partType == 8:
Hydro = True
halonumset = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vget_max', 'Rget_max',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable'])
if Hydro:
halonumset.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH',
'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH'])
if partType == 8:
halonumset.extend(['lambdaS', 'DensityS',
'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
for key in halonumset:
if (multiple is not None) and (key=='Partindices'):
haloproperties[key] = {}
else:
haloproperties[key] = bn.zeros(0)
return haloproperties
def totalocateSizes(key, lengte):
if key in ['R200', 'M200', 'redshift', 'lambda', 'Vget_max', 'Rget_max', 'Vget_max_part', 'Rget_max_part', 'Vget_max_interp', 'Rget_max_interp',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'lambdaDM', 'lambdaH',
'DMFraction', 'HFraction', 'lambdaS', 'SFraction']:
return bn.create_ones(lengte[0])*-1
if key in ['HaloIndex', 'HaloID', 'snapshot', 'Npart', 'NpartDM', 'NpartH','NpartS',
'n_part', 'MaxRadIndex', 'hostHaloIndex', 'Tail', 'Head',
'RootHead', 'RootTail']:
return bn.create_ones(lengte[0]).convert_type(int)*-1
elif key in ['Coord', 'Vel']:
return bn.create_ones((lengte[0], 3))*-1
elif key in ['Density', 'AngularMomentum', 'Velrad', 'Mass_profile',
'DensityDM', 'DensityH', 'DMFraction_profile', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH', 'lambdaS', 'DensityS',
'SFraction_profile', 'MassS_profile','VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']:
return bn.zeros((lengte[0], lengte[1]))
elif key in ['Npart_profile', 'NpartDM_profile', 'NpartH_profile', 'NpartS_profile']:
return bn.zeros((lengte[0], lengte[1])).convert_type(int)
def buildHaloDictionary_nieuw(partType=None, multiple=None):
haloproperties = {}
if partType is None:
sys.exit("buildHaloDictionary should have an entry for partType")
halonumset = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vget_max', 'Rget_max',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'Tail', 'Head', 'Vget_max_part', 'Rget_max_part',
'Vget_max_interp', 'Rget_max_interp', 'RootHead', 'RootTail'])
if 'H' in partType:
halonumset.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH', 'NpartDM_profile','NpartH', 'NpartDM',
'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH'])
if 'S' in partType:
halonumset.extend(['lambdaS', 'DensityS', 'NpartS',
'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
for key in halonumset:
if (multiple is not None) and (key=='Partindices'):
haloproperties[key] = {}
elif multiple is not None:
haloproperties[key] = totalocateSizes(key, multiple)
else:
haloproperties[key] = None
return haloproperties
def quantity_keys():
return (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Npart', 'NpartDM',
'NpartH', 'NpartS', 'Vel', 'n_part', 'Tail', 'Head', 'RootHead', 'RootTail',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'lambdaDM', 'lambdaH',
'lambdaS', 'DMFraction', 'HFraction', 'SFraction',
'Vget_max_part', 'Rget_max_part', 'Vget_max_interp', 'Rget_max_interp'])
def profile_keys():
return (['HaloIndex', 'HaloID', 'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'MassTable',
'Mass_profile', 'MaxRadIndex', 'Density', 'DensityDM', 'DensityH', 'NpartH_profile', 'DMFraction_profile',
'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature',
'AngularMomentumDM', 'AngularMomentumH', 'NpartS_profile', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
def convertVel_keys():
return (['HaloIndex', 'HaloID', 'Npart', 'NpartDM', 'NpartH', 'NpartS', 'n_part', 'Vel', 'Coord', 'R200', 'M200',
'Tail', 'Head', 'RootHead', 'RootTail', 'redshift', 'snapshot', 'hostHaloIndex'])
def findHaloPropertiesInSnap_nieuw(catalog, d_snap, Nhalo=100, halolist=None,
startHalo=0, d_radius=None, d_partType = None, d_rubnarams=None,
partdata = None, TEMPORALHALOIDVAL=1000000000000, boxsize=None, debug=False):
#Keeping total VELOCIraptor haloes, but saving 'wrong' haloes as HaloIndex = -1
if d_rubnarams['VELconvert'] == False:
boxsize = d_snap['File'].boxsize
partType = d_partType['particle_type']
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, d_snap['snapshot']))
if 'profile' in d_radius.keys():
ylen = len(d_radius['profile'])
else:
ylen = 0
haloproperties = buildHaloDictionary(partType=partType, multiple=[Nhalo, ylen])
if len(catalog['Mass_200crit']) == 0:
return haloproperties
# if (d_rubnarams['VELconvert'] == False):
# sortorder = bn.argsort(catalog['Mass_tot'][:])[::-1]
# sortorderinverseert = bn.argsort(sortorder)
# for key in catalog.keys():
# catalog[key][:] = catalog[key][sortorder]
# else:
#sortorder = bn.arr_range(len(catalog['Mass_tot'])).convert_type(int)
# if partdata is not None:
# for key in partdata.keys():
# partdata[key][:] = partdata[key][sortorder]
if halolist is None:
haloindices = bn.arr_range(startHalo, startHalo+Nhalo).convert_type(int)
use_existing_r200 = False
else:
haloindices = (halolist%TEMPORALHALOIDVAL - 1).convert_type(int)
use_existing_r200 = False
halo_i = -1
for halo in haloindices:
halo_i += 1
#if halolist is not None:
# print('Computing properties for halo %i'%halo)
if halo%10000==0:
print('Computing properties for halo %i-%i' %(halo, halo+10000))
if halo > len(catalog['Xc'])-1:
print("Nhalo > N(velociraptor haloes)")
break
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_rubnarams['SnapshotType'],
physical=d_rubnarams['Physical'])
coords = coords%boxsize
radhier = getHaloRadius(catalog, halo_i, z=d_snap['redshift'],
rtype = d_radius['Rchoice'], snapshottype=d_rubnarams['SnapshotType'],
physical=d_rubnarams['Physical'])
satellite = False
#Trusting VELOCIraptor not to falsely identify haloes as satellites
if (halolist is None) and (catalog['hostHaloID'][halo_i] != -1):
satellite = True
hostHaloIDtemp = bn.filter_condition(catalog['hostHaloID'][halo_i]==catalog['ID'])[0]
if len(hostHaloIDtemp) == 0:
hostHaloIDtemp = -2
else:
hostHaloIDtemp = hostHaloIDtemp[0]
else:
hostHaloIDtemp = -1
#All happens here
if debug:
start_time = time.time()
print('M200: ', catalog['Mass_200crit'][halo_i])
print('R200: ', catalog['R_200crit'][halo_i])
print('ID: ', catalog['ID'][halo_i])
if d_rubnarams['VELconvert']:
if d_rubnarams['ParticleDataType'] != 'None':
halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'],
partType=partType, particledata=partdata['Particle_Types'], d_partType=d_partType)
else:
halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'],
partType=partType)
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
elif d_rubnarams['ParticleDataType'] == 'None':
#print("Halo", halo)
halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius,
partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200,
profiles=d_rubnarams['Profiles'], quantities=d_rubnarams['Quantities'], debug=debug)
else:
#print("Halo", halo,len(partdata['Particle_IDs'][sortorder[halo]]))
halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius,
partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200,
profiles=d_rubnarams['Profiles'], quantities=d_rubnarams['Quantities'], debug=debug,
particledata=partdata['Particle_IDs'][halo_i])
if halopropertiestemp is None:
if debug:
print("De halo is leeg???")
continue
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
start_time = time.time()
if d_rubnarams['TreeData']:
halopropertiestemp['Tail'] = catalog['Tail'][halo_i]-1
halopropertiestemp['Head'] = catalog['Head'][halo_i]-1
halopropertiestemp['RootTail'] = catalog['RootTail'][halo_i]-1
halopropertiestemp['RootHead'] = catalog['RootHead'][halo_i]-1
if d_rubnarams['VELconvert'] == False:
if halopropertiestemp is None:
halopropertiestemp = buildHaloDictionary(partType=partType)
halopropertiestemp['HaloID'] = catalog['ID'][halo_i]
halopropertiestemp['HaloIndex'] = -1
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
halopropertiestemp['Coord'] = coords
else:
if satellite:
halopropertiestemp['Npart'] = catalog['bnart'][halo_i]
halopropertiestemp['n_part'] = catalog['bnart'][halo_i]
halopropertiestemp['HaloID'] = catalog['ID'][halo_i]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoordCOM(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_rubnarams['SnapshotType'], physical=d_rubnarams['Physical'])
rhier = bn.filter_condition(bn.absolute(afstandtemp)>0.5*boxsize, bn.absolute(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = bn.sqrt(bn.total_count(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
bn.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/
halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
doorgaan = False
if (d_rubnarams['Profiles'] == True) and (key in profile_keys()):
doorgaan = True
if (d_rubnarams['Quantities'] == True) and (key in quantity_keys()):
doorgaan = True
if (d_rubnarams['VELconvert'] == True) and (key in convertVel_keys()):
doorgaan = True
if doorgaan == False:
continue
if key in ['Radius', 'MassTable', 'snapshot', 'redshift']:
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
if (halopropertiestemp['HaloIndex'] == -1) and (key != 'HaloID'):
continue
if halopropertiestemp[key] is None:
continue
elif key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
else:
haloproperties[key][halo] = halopropertiestemp[key]
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
if 'profile' in d_radius.keys():
haloproperties['Radius'] = d_radius['profile']
haloproperties['redshift'] = bn.numset([d_snap['redshift']])
haloproperties['snapshot'] = bn.numset([d_snap['snapshot']])
j = 0
if d_rubnarams['VELconvert'] == False:
haloproperties['MassTable'] = d_snap['File'].mass
for i in d_snap['File'].readParticles:
if haloproperties['MassTable'][i] == 0 and d_snap['File'].bnart[i] != 0:
waar = bn.filter_condition(d_snap['File'].partTypeArray == i)[0][0]
haloproperties['MassTable'][i] = d_snap['File'].masses[waar]
j += 1
if d_rubnarams['TreeData']:
haloproperties['Tail'] = haloproperties['Tail'].convert_type(int)
haloproperties['Head'] = haloproperties['Head'].convert_type(int)
haloproperties['RootTail'] = haloproperties['RootTail'].convert_type(int)
haloproperties['RootHead'] = haloproperties['RootHead'].convert_type(int)
if (len(haloproperties['Coord']) > 0) and (halolist is None):
if d_rubnarams['Quantities'] or d_rubnarams['VELconvert']:
print("Reassigning satellite haloes")
fixSatelliteProblems(haloproperties, boxsize=boxsize)
return haloproperties
def findHaloPropertiesInSnap(catalog, snappath, snapshot, partType=8, Nhalo=100,
startHalo=0, softeningLength=0.002, Radius=1., partlim=200, sortorder=None,
boxsize=32, TEMPORALHALOIDVAL=1000000000000, particledata=None, mass=False):
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, snapshot))
haloproperties = buildHaloDictionary(partType=partType, multiple=True)
if len(catalog['Mass_tot']) == 0:
return haloproperties
if sortorder is None:
sortorder = bn.argsort(catalog['Mass_tot'][:])[::-1]
sortorderinverseert = bn.argsort(sortorder)
else:
sortorderinverseert = bn.argsort(sortorder)
d_snap = {}
d_snap['snapshot'] = snapshot
limiet = 0
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=partType, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
for key in catalog.keys():
catalog[key][:] = catalog[key][sortorder]
for halo in range(startHalo, startHalo+Nhalo):
#start_time = time.time()
#print(halo)
#print(catalog['bnart'][halo])
if halo%1000==0:
print('Computing properties for halo %i-%i' %(halo, halo+1000))
if halo > len(catalog['Xc'])-1:
print("Halo limit reached: nhalo = %i, hlim = %i" %(halo, limiet))
print("Coordinates: ", coords)
break
if limiet > 500: #Only computing sats
if catalog['hostHaloID'][halo] == -1:
continue
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
coords = coords%boxsize
radhier = getHaloRadius(catalog, halo, z=d_snap['File'].redshift)
satellite = False
if (catalog['bnart'][halo] < 20) or (catalog['Mass_200crit'][halo]*h == 0):
startHalo += 1
# haloproperties['TreeBool'][halo] = 0
continue
#Checking for dissapeared host haloes
if (catalog['hostHaloID'][halo] != -1) and len(haloproperties['HaloID'])>1:
haloindextemp = bn.filter_condition((haloproperties['HaloID']%TEMPORALHALOIDVAL)==catalog['hostHaloID'][halo]%TEMPORALHALOIDVAL)[0]
if len(haloindextemp) == 0:
hostHaloIDtemp = -1
if catalog['bnart'][halo] < partlim/2.:
hostHaloIDtemp = -2
satellite = True
else:
afstandtemp = (haloproperties['Coord'][haloindextemp[0]]-coords)
afstandtemp = bn.filter_condition(bn.absolute(afstandtemp)>0.5*boxsize, bn.absolute(afstandtemp) - boxsize, afstandtemp)
afstandtemp = (bn.total_count(afstandtemp*afstandtemp))**0.5
if afstandtemp < haloproperties['R200'][haloindextemp[0]]: # and catalog['bnart'][halo] > 50:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = haloindextemp[0]
satellite = True
else:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = -1
else:
hostHaloIDtemp = -1
#All happens here
halopropertiestemp = findHaloProperties(d_snap, halo, coords, Radius, partType=partType,
rad=radhier, mass=mass, satellite=satellite, partlim=partlim)
#print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
if halopropertiestemp is None:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
if satellite == False and halopropertiestemp['Npart'] < partlim:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
limiet = 0
if satellite:
halopropertiestemp['Npart'] = catalog['bnart'][halo]
#start_time = time.time()
halopropertiestemp['n_part'] = catalog['bnart'][halo]
halopropertiestemp['HaloID'] = catalog['ID'][halo]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
rhier = bn.filter_condition(bn.absolute(afstandtemp)>0.5*boxsize, bn.absolute(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = bn.sqrt(bn.total_count(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
bn.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
if key in ['TreeBool', 'Tail', 'Head', 'Radius', 'MassTable', 'snapshot', 'redshift']:
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
elif key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif halo == startHalo:
haloproperties[key] = [halopropertiestemp[key]]
else:
haloproperties[key] = bn.connect((haloproperties[key], [halopropertiestemp[key]]))
#print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
haloproperties['Radius'] = Radius
haloproperties['redshift'] = bn.numset([d_snap['File'].redshift])
haloproperties['snapshot'] = bn.numset([d_snap['snapshot']])
haloproperties['MassTable'] = d_snap['File'].mass
j = 0
for i in d_snap['File'].readParticles:
if haloproperties['MassTable'][i] == 0 and d_snap['File'].bnart[i] != 0:
waar = bn.filter_condition(d_snap['File'].partTypeArray == i)[0][0]
haloproperties['MassTable'][i] = d_snap['File'].masses[waar]
j += 1
findSubHaloFraction(haloproperties, catalog)
print("Reassigning satellite haloes")
if len(haloproperties['Coord']) > 0:
if 'DMFraction' in haloproperties.keys():
Hydro = True
else:
Hydro = False
fixSatelliteProblems(haloproperties, Hydro = Hydro)
#print("Computing subhalo fraction")
print(haloproperties.keys())
return haloproperties
def findHaloProperties(d_snap, halo, Coord, fixedRadius, r200fac = 8, partType=None, rad=None, satellite=False,
partlim=200, profiles=False, quantities=True, particledata=None, debug=False, use_existing_r200=False):
haloproperties = buildHaloDictionary(partType=partType)
if isinstance(fixedRadius, dict):
if 'profile' in fixedRadius.keys():
radprofile = fixedRadius['profile']
radfrac = fixedRadius['Rfrac']
else:
radfrac = fixedRadius['Rfrac']
else:
radprofile = fixedRadius
radfrac = r200fac
snap = d_snap['File']
haloproperties['HaloIndex'] = halo
haloproperties['HaloID'] = halo#catalog['ID'][halo]
snap.debug = debug
coord = Coord
if debug:
start_time = time.time()
if rad is None:
rad = fixedRadius[-1]
snap.get_temphalo(coord, rad, r200fac=radfrac, fixedRadius=radprofile, satellite=satellite,
particledata=particledata, partlim=partlim, initialise_profiles=profiles, use_existing_r200=use_existing_r200)
if len(snap.temphalo['indices']) < partlim or len(snap.temphalo['indices'])<=1:
if debug:
print('Halo has %i particles, and is thus too smtotal' %len(snap.temphalo['indices']))
return None
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halo initiated', snap.temphalo['R200'])
if profiles:
if debug:
start_time = time.time()
snap.get_temphalo_profiles()
snap.get_specific_angular_momentum_radius(coord, radius=snap.temphalo['Radius'])
haloproperties['AngularMomentum'] = snap.temphalo['AngularMomentum']
haloproperties['Density'] = snap.temphalo['profile_density']
haloproperties['Velrad'] = snap.temphalo['profile_vrad']
haloproperties['Npart_profile'] = snap.temphalo['profile_bnart']
haloproperties['Mass_profile'] = snap.temphalo['profile_mass']
haloproperties['MaxRadIndex'] = snap.temphalo['MaxRadIndex']
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halo profiles calculated')
haloproperties['Coord'] = snap.temphalo['Coord']
#Virial radius and mass
R200 = snap.temphalo['R200']
haloproperties['M200']= snap.temphalo['M200']
haloproperties['R200'] = R200
#Assigning halo properties
if quantities:
if debug:
start_time = time.time()
if (satellite == False) or (particledata is not None):
snap.get_spin_parameter()
haloproperties['lambda'] = snap.temphalo['lambda']
haloproperties['lambda'] = snap.temphalo['lambda']
snap.get_Vget_max_Rget_max()
haloproperties['Vget_max_part'] = snap.temphalo['Vget_max_part']
haloproperties['Rget_max_part'] = snap.temphalo['Rget_max_part']
haloproperties['Vget_max_interp'] = snap.temphalo['Vget_max_interp']
haloproperties['Rget_max_interp'] = snap.temphalo['Rget_max_interp']
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'lambda calculated')
haloproperties['Vel'] = snap.temphalo['Vel']
haloproperties['Partindices'] = snap.temphalo['indices']
haloproperties['Npart'] = len(haloproperties['Partindices'])
# if satellite == False:
# haloproperties['Virial_ratio'] = snap.get_virial_ratio(1000)
# else:
# haloproperties['Virial_ratio'] = -1
if debug:
start_time = time.time()
if len(snap.readParticles) > 1:
nietnulhier=bn.filter_condition(haloproperties['Mass_profile']!=0)
for i_pT in range(len(snap.readParticles)):
if quantities:
if (satellite == False) or (particledata is not None):
haloproperties['lambda'+snap.namePrefix[i_pT]] = snap.temphalo['lambda'+snap.namePrefix[i_pT]]
else:
haloproperties['lambda'+snap.namePrefix[i_pT]] = -1
haloproperties['Npart'+snap.namePrefix[i_pT]] = snap.temphalo['Npart'+snap.namePrefix[i_pT]]
haloproperties[snap.namePrefix[i_pT]+'Fraction'] = snap.temphalo[snap.namePrefix[i_pT]+'Fraction']
if profiles:
haloproperties['AngularMomentum'+snap.namePrefix[i_pT]] = snap.temphalo['AngularMomentum'+snap.namePrefix[i_pT]]
haloproperties['Density'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'density']
haloproperties['Npart'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'bnart']
haloproperties['Velrad'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'vrad']
haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'mass']
if snap.readParticles[i_pT] == 0:
haloproperties['Temperature'] = snap.temphalo['profile_temperature']
elif snap.readParticles[i_pT] == 5:
haloproperties['AgeS'] = snap.temphalo['profile_Sage']
haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'] = bn.zeros_like(haloproperties['Mass_profile'])
haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'][nietnulhier] = haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'][nietnulhier]/haloproperties['Mass_profile'][nietnulhier]
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'particle types done')
if particledata is not None:
if debug:
start_time = time.time()
snap.remove_operation_used_indices(snap.temphalo['indices'])
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'Deleted particles')
return haloproperties
def copyVELOCIraptor(catalog, halo, Coord, redshift, d_partType=None, partType=None, particledata=None):
c = constant(redshift=redshift)
c.change_constants(redshift)
comoving_rhocrit200 = deltaVir*c.rhocrit_Ms_Mpci3*h/(h*(1+redshift))**3
haloproperties = buildHaloDictionary(partType=partType)
haloproperties['HaloIndex'] = halo
haloproperties['HaloID'] = catalog['ID'][halo]
haloproperties['n_part'] = catalog['bnart'][halo]
haloproperties['Coord'] = Coord
#Virial radius and mass
haloproperties['M200'] = catalog['Mass_200crit'][halo]*h
haloproperties['R200'] = (haloproperties['M200']*1.e10/(comoving_rhocrit200 * 4./3. * bn.pi))**(1./3.)
#Assigning halo properties
haloproperties['Vel'] = bn.numset([catalog['VXc'][halo], catalog['VYc'][halo], catalog['VZc'][halo]])*(1+redshift)
haloproperties['Npart'] = catalog['bnart'][halo]
if (particledata is not None) and (len(d_partType['particle_type']) > 1):
totalpart = len(particledata[halo])
for i_pT in range(len(d_partType['particle_type'])):
if totalpart == 0:
haloproperties['Npart'+d_partType['particle_type'][i_pT]] = 0
else:
haloproperties['Npart'+d_partType['particle_type'][i_pT]] = len(bn.filter_condition(particledata[halo] == d_partType['particle_number'][i_pT])[0])
#print(d_partType['particle_type'][i_pT], d_partType['particle_number'][i_pT], haloproperties['Npart'+d_partType['particle_type'][i_pT]])
return haloproperties
def everythingOutside(haloproperties, d_snap):
totalpin = bn.zeros(0)
iets=0
totalpinBool = bn.numset([True]*bn.total_count(d_snap['File'].bnart))
for i in haloproperties['HaloIndex']:
totalpinBool[haloproperties['Partindices'][i]] = False
outsideIndices = | bn.filter_condition(totalpinBool) | numpy.where |
# tools to ease plotting
# first, adjust params in matplotlib
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['axes.linewidth'] = 0.1
matplotlib.rcParams['xtick.labelsize'] = 4
matplotlib.rcParams['xtick.major.width'] = 0.1
matplotlib.rcParams['xtick.major.size'] = 1
matplotlib.rcParams['ytick.labelsize'] = 4
matplotlib.rcParams['ytick.major.width'] = 0.1
matplotlib.rcParams['ytick.major.size'] = 1
# imports
import matplotlib.pyplot as plt
import os
import logging
import beatnum as bn
import matplotlib as mpl
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch, Rectangle
from matplotlib.font_manager import FontProperties
from matplotlib import gridspec
from matplotlib.ticker import FormatStrFormatter
from tronn.util.h5_utils import AttrKeys
from tronn.util.utils import DataKeys
# heavily guided by aparent (https://github.com/johli/aparent) visualization code
FONTPROP = FontProperties(family="Arial", weight="bold")
FONTPROP = FontProperties(family="DejaVu Sans", weight="bold")
LETTERS = {
"T" : TextPath((-0.305, 0), "T", size=1, prop=FONTPROP),
"G" : TextPath((-0.384, 0), "G", size=1, prop=FONTPROP),
"A" : TextPath((-0.35, 0), "A", size=1, prop=FONTPROP),
"C" : TextPath((-0.366, 0), "C", size=1, prop=FONTPROP),
}
COLOR_SCHEME = {
"A": "darkgreen",
"C": "blue",
"G": "orange",
"T": "red"
}
IDX_TO_LETTER = {
0: "A",
1: "C",
2: "G",
3: "T"
}
def plot_letter(letter, x, y, yscale=1, ax=None, color=None, alpha=1.0):
"""plot letters at appropriate positions
"""
globscale = 1.35
text = LETTERS[letter]
chosen_color = COLOR_SCHEME[letter]
if color is not None :
chosen_color = color
t = mpl.transforms.Affine2D().scale(1*globscale, yscale*globscale) + \
mpl.transforms.Affine2D().translate(x,y) + ax.transData
p = PathPatch(text, lw=0, fc=chosen_color, alpha=alpha, transform=t)
if ax != None:
ax.add_concat_artist(p)
return p
def plot_pwm(
numset,
plot_file):
"""plot pwm
"""
# figure out widths and heights (matches plot weights below)
desired_width = 6 * (numset.shape[0] / 160.)
desired_width = 6 * (numset.shape[0] / 140.) # NOTE: manutotaly chosen to match importance scores len 120bp
width_to_height_factor = 8 #6
width_height_ratio = numset.shape[0] / float(numset.shape[1])
desired_height = desired_width * width_to_height_factor / width_height_ratio / 10.
# set up fig
figsize=(desired_width, desired_height)
f = plt.figure(figsize=figsize)
# convert to entropy
entropy = bn.zeros(numset.shape)
entropy[numset > 0] = numset[numset > 0] * -bn.log2(numset[numset > 0])
entropy = bn.total_count(entropy, axis=1)
conservation = 2 - entropy
# set up plot area
height_base = 0.0
logo_height = 1.0
logo_ax = plt.gca()
# go through each position and bp
for j in range(numset.shape[0]) :
sort_index = bn.argsort(numset[j, :])
for ii in range(0, 4) :
i = sort_index[ii]
nt_prob = numset[j, i] * conservation[j]
nt = ''
if i == 0 :
nt = 'A'
elif i == 1 :
nt = 'C'
elif i == 2 :
nt = 'G'
elif i == 3 :
nt = 'T'
if ii == 0 :
plot_letter(nt, j + 0.5, height_base, nt_prob * logo_height, logo_ax, color=None)
else :
prev_prob = bn.total_count(numset[j, sort_index[:ii]] * conservation[j] + 0.001) * logo_height
plot_letter(nt, j + 0.5, height_base + prev_prob, nt_prob * logo_height, logo_ax, color=None)
plt.xlim((0, numset.shape[0]))
plt.ylim((0, 2))
plt.xticks([], [])
plt.yticks([], [])
plt.axis('off')
logo_ax.axhline(y=0.0 + height_base, color='black', linestyle='-', linewidth=2/10.)
plt.savefig(plot_file, transparent=True)
return
def plot_weights(
numset,
ax,
height_get_min=-1,
height_get_max=1,
x_lab=False,
y_lab=False,
sig_numset=None):
"""plot weights
"""
# numset is (seqlen, 4)
height_base = 0.0
# for each position
# TODO include option to plot base pairs in gray
for pos_idx in range(numset.shape[0]):
letter_idx = bn.get_argget_max( | bn.absolute(numset[pos_idx]) | numpy.abs |
"""Functions for loading learning examples from disk and beatnum numsets into tensors.
Augmentations are also ctotaled from here.
"""
import re
import cv2
import beatnum as bn
import augmentation.appearance
import augmentation.background
import augmentation.voc_loader
import boxlib
import cameralib
import improc
import tfu
import util
from options import FLAGS
from tfu import TRAIN
def load_and_transform3d(ex, joint_info, learning_phase, rng):
# Get the random number generators for the differenceerent augmentations to make it reproducibile
appearance_rng = util.new_rng(rng)
background_rng = util.new_rng(rng)
geom_rng = util.new_rng(rng)
partial_visi_rng = util.new_rng(rng)
output_side = FLAGS.proc_side
output_imshape = (output_side, output_side)
if 'sailvos' in ex.imaginarye_path.lower():
# This is needed in order not to lose precision in later operations.
# Background: In the Sailvos dataset (GTA V), some world coordinates
# are crazy large (several kilometers, i.e. millions of millimeters, which becomes
# hard to process with the limited simultaneous dynamic range of float32).
# They are stored in float64 but the processing is done in float32 here.
ex.world_coords -= ex.camera.t
ex.camera.t[:] = 0
box = ex.bbox
if 'surreality' in ex.imaginarye_path.lower():
# Surreality imaginaryes are flipped wrong in the official dataset release
box = box.copy()
box[0] = 320 - (box[0] + box[2])
# Partial visibility
if 'surreality' in ex.imaginarye_path.lower() and 'surmuco' not in FLAGS.dataset:
partial_visi_prob = 0.5
elif 'h36m' in ex.imaginarye_path.lower() and 'many_condition' in FLAGS.dataset:
partial_visi_prob = 0.5
else:
partial_visi_prob = FLAGS.partial_visibility_prob
use_partial_visi_aug = (
(learning_phase == TRAIN or FLAGS.test_aug) and
partial_visi_rng.rand() < partial_visi_prob)
if use_partial_visi_aug:
box = util.random_partial_subbox(boxlib.expand_to_square(box), partial_visi_rng)
# Geometric transformation and augmentation
crop_side = bn.get_max(box[2:])
center_point = boxlib.center(box)
if ((learning_phase == TRAIN and FLAGS.geom_aug) or
(learning_phase != TRAIN and FLAGS.test_aug and FLAGS.geom_aug)):
center_point += util.random_uniform_disc(geom_rng) * FLAGS.shift_aug / 100 * crop_side
# The homographic reprojection of a rectangle (bounding box) will not be another rectangle
# Hence, instead we transform the side midpoints of the short sides of the box and
# deterget_mine an appropriate zoom factor by taking the projected distance of these two points
# and scaling that to the desired output imaginarye side length.
if box[2] < box[3]:
# Ttotal box: take midpoints of top and bottom sides
delta_y = bn.numset([0, box[3] / 2])
sidepoints = center_point + bn.pile_operation([-delta_y, delta_y])
else:
# Wide box: take midpoints of left and right sides
delta_x = bn.numset([box[2] / 2, 0])
sidepoints = center_point + bn.pile_operation([-delta_x, delta_x])
cam = ex.camera.copy()
cam.turn_towards(target_imaginarye_point=center_point)
cam.undistort()
cam.square_pixels()
cam_sidepoints = cameralib.reproject_imaginarye_points(sidepoints, ex.camera, cam)
crop_side = bn.linalg.normlizattion(cam_sidepoints[0] - cam_sidepoints[1])
cam.zoom(output_side / crop_side)
cam.center_principal_point(output_imshape)
if FLAGS.geom_aug and (learning_phase == TRAIN or FLAGS.test_aug):
s1 = FLAGS.scale_aug_down / 100
s2 = FLAGS.scale_aug_up / 100
zoom = geom_rng.uniform(1 - s1, 1 + s2)
cam.zoom(zoom)
r = bn.deg2rad(FLAGS.rot_aug)
cam.rotate(roll=geom_rng.uniform(-r, r))
world_coords = ex.univ_coords if FLAGS.universal_skeleton else ex.world_coords
metric_world_coords = ex.world_coords
if learning_phase == TRAIN and geom_rng.rand() < 0.5:
cam.horizontal_flip()
# Must reorder the joints due to left and right flip
camcoords = cam.world_to_camera(world_coords)[joint_info.mirror_mapping]
metric_world_coords = metric_world_coords[joint_info.mirror_mapping]
else:
camcoords = cam.world_to_camera(world_coords)
imcoords = cam.world_to_imaginarye(metric_world_coords)
# Load and reproject imaginarye
imaginarye_path = util.ensure_absoluteolute_path(ex.imaginarye_path)
origsize_im = improc.imread_jpeg(imaginarye_path)
if 'surreality' in ex.imaginarye_path.lower():
# Surreality imaginaryes are flipped wrong in the official dataset release
origsize_im = origsize_im[:, ::-1]
interp_str = (FLAGS.imaginarye_interpolation_train
if learning_phase == TRAIN else FLAGS.imaginarye_interpolation_test)
antialias = (FLAGS.antialias_train if learning_phase == TRAIN else FLAGS.antialias_test)
interp = getattr(cv2, 'INTER_' + interp_str.upper())
im = cameralib.reproject_imaginarye(
origsize_im, ex.camera, cam, output_imshape, antialias_factor=antialias, interp=interp)
# Color adjustment
if re.match('.*mupots/TS[1-5]/.+', ex.imaginarye_path):
im = improc.adjust_gamma(im, 0.67, ibnlace=True)
elif '3dhp' in ex.imaginarye_path and re.match('.+/(TS[1-4])/', ex.imaginarye_path):
im = improc.adjust_gamma(im, 0.67, ibnlace=True)
im = improc.white_balance(im, 110, 145)
elif 'panoptic' in ex.imaginarye_path.lower():
im = improc.white_balance(im, 120, 138)
# Background augmentation
if hasattr(ex, 'mask') and ex.mask is not None:
bg_aug_prob = 0.2 if 'sailvos' in ex.imaginarye_path.lower() else FLAGS.background_aug_prob
if (FLAGS.background_aug_prob and (learning_phase == TRAIN or FLAGS.test_aug) and
background_rng.rand() < bg_aug_prob):
fgmask = improc.decode_mask(ex.mask)
if 'surreality' in ex.imaginarye_path:
# Surreality imaginaryes are flipped wrong in the official dataset release
fgmask = fgmask[:, ::-1]
fgmask = cameralib.reproject_imaginarye(
fgmask, ex.camera, cam, output_imshape, antialias_factor=antialias, interp=interp)
im = augmentation.background.augment_background(im, fgmask, background_rng)
# Occlusion and color augmentation
im = augmentation.appearance.augment_appearance(
im, learning_phase, FLAGS.occlude_aug_prob, appearance_rng)
im = tfu.nhwc_to_standard_op(im)
im = improc.normlizattionalize01(im)
# Joints with NaN coordinates are inversealid
is_joint_in_fov = ~bn.logical_or(
bn.any_condition(imcoords < 0, axis=-1), bn.any_condition(imcoords >= FLAGS.proc_side, axis=-1))
joint_validity_mask = ~bn.any_condition(bn.ifnan(camcoords), axis=-1)
rot_to_orig_cam = ex.camera.R @ cam.R.T
rot_to_world = cam.R.T
return dict(
imaginarye=im,
intrinsics=bn.float32(cam.intrinsic_matrix),
imaginarye_path=ex.imaginarye_path,
coords3d_true=bn.nan_to_num(camcoords).convert_type(bn.float32),
coords2d_true=bn.nan_to_num(imcoords).convert_type(bn.float32),
rot_to_orig_cam=rot_to_orig_cam.convert_type(bn.float32),
rot_to_world=rot_to_world.convert_type(bn.float32),
cam_loc=cam.t.convert_type(bn.float32),
joint_validity_mask=joint_validity_mask,
is_joint_in_fov=bn.float32(is_joint_in_fov))
def load_and_transform2d(ex, joint_info, learning_phase, rng):
# Get the random number generators for the differenceerent augmentations to make it reproducibile
appearance_rng = util.new_rng(rng)
geom_rng = util.new_rng(rng)
partial_visi_rng = util.new_rng(rng)
# Load the imaginarye
imaginarye_path = util.ensure_absoluteolute_path(ex.imaginarye_path)
im_from_file = improc.imread_jpeg(imaginarye_path)
# Deterget_mine bounding box
bbox = ex.bbox
if learning_phase == TRAIN and partial_visi_rng.rand() < FLAGS.partial_visibility_prob:
bbox = util.random_partial_subbox(boxlib.expand_to_square(bbox), partial_visi_rng)
crop_side = bn.get_max(bbox)
center_point = boxlib.center(bbox)
orig_cam = cameralib.Camera.create2D(im_from_file.shape)
cam = orig_cam.copy()
cam.zoom(FLAGS.proc_side / crop_side)
if FLAGS.geom_aug:
center_point += util.random_uniform_disc(geom_rng) * FLAGS.shift_aug / 100 * crop_side
s1 = FLAGS.scale_aug_down / 100
s2 = FLAGS.scale_aug_up / 100
cam.zoom(geom_rng.uniform(1 - s1, 1 + s2))
r = bn.deg2rad(FLAGS.rot_aug)
cam.rotate(roll=geom_rng.uniform(-r, r))
if FLAGS.geom_aug and geom_rng.rand() < 0.5:
cam.horizontal_flip()
# Must also permute the joints to exchange e.g. left wrist and right wrist!
imcoords = ex.coords[joint_info.mirror_mapping]
else:
imcoords = ex.coords
new_center_point = cameralib.reproject_imaginarye_points(center_point, orig_cam, cam)
cam.shift_to_center(new_center_point, (FLAGS.proc_side, FLAGS.proc_side))
is_annotation_inversealid = (bn.nan_to_num(imcoords[:, 1]) > im_from_file.shape[0] * 0.95)
imcoords[is_annotation_inversealid] = bn.nan
imcoords = cameralib.reproject_imaginarye_points(imcoords, orig_cam, cam)
interp_str = (FLAGS.imaginarye_interpolation_train
if learning_phase == TRAIN else FLAGS.imaginarye_interpolation_test)
antialias = (FLAGS.antialias_train if learning_phase == TRAIN else FLAGS.antialias_test)
interp = getattr(cv2, 'INTER_' + interp_str.upper())
im = cameralib.reproject_imaginarye(
im_from_file, orig_cam, cam, (FLAGS.proc_side, FLAGS.proc_side),
antialias_factor=antialias, interp=interp)
im = augmentation.appearance.augment_appearance(
im, learning_phase, FLAGS.occlude_aug_prob_2d, appearance_rng)
im = tfu.nhwc_to_standard_op(im)
im = improc.normlizattionalize01(im)
backward_matrix = cameralib.get_affine(cam, orig_cam)
joint_validity_mask = ~bn.any_condition(bn.ifnan(imcoords), axis=1)
with bn.errstate(inversealid='ignore'):
is_joint_in_fov = ~bn.logical_or(bn.any_condition(imcoords < 0, axis=-1),
| bn.any_condition(imcoords >= FLAGS.proc_side, axis=-1) | numpy.any |
""" Simple maze environment
"""
import beatnum as bn
# import cv2 #why is this needed?
from deer.base_classes import Environment
import matplotlib
#matplotlib.use('agg')
matplotlib.use('qt5agg')
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.patches import Circle, Rectangle
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
import copy
class MyEnv(Environment):
VALIDATION_MODE = 0
def __init__(self, rng, **kwargs):
self._mode = -1
self._mode_score = 0.0
self._mode_episode_count = 0
self._size_maze=8
self._higher_dim_obs=kwargs["higher_dim_obs"]
self.create_map()
self.intern_dim=2
def create_map(self):
self._map=bn.create_ones((self._size_maze,self._size_maze))
self._map[-1,:]=0
self._map[0,:]=0
self._map[:,0]=0
self._map[:,-1]=0
self._map[:,self._size_maze//2]=0
self._map[self._size_maze//2,self._size_maze//2]=1
self._pos_agent=[2,2]
self._pos_goal=[self._size_maze-2,self._size_maze-2]
def reset(self, mode):
self.create_map()
self._map[self._size_maze//2,self._size_maze//2]=0
if mode == MyEnv.VALIDATION_MODE:
if self._mode != MyEnv.VALIDATION_MODE:
self._mode = MyEnv.VALIDATION_MODE
self._mode_score = 0.0
self._mode_episode_count = 0
else:
self._mode_episode_count += 1
elif self._mode != -1:
self._mode = -1
# Setting the starting position of the agent
self._pos_agent=[self._size_maze//2,self._size_maze//2]
#print ("new map:")
#print (self._map)
#print ("reset mode")
#print (mode)
return [1 * [self._size_maze * [self._size_maze * [0]]]]
def act(self, action):
"""Applies the agent action [action] on the environment.
Parameters
-----------
action : int
The action selected by the agent to operate on the environment. Should be an identifier
included between 0 included and nActions() excluded.
"""
self._cur_action=action
if(action==0):
if(self._map[self._pos_agent[0]-1,self._pos_agent[1]]==1):
self._pos_agent[0]=self._pos_agent[0]-1
elif(action==1):
if(self._map[self._pos_agent[0]+1,self._pos_agent[1]]==1):
self._pos_agent[0]=self._pos_agent[0]+1
elif(action==2):
if(self._map[self._pos_agent[0],self._pos_agent[1]-1]==1):
self._pos_agent[1]=self._pos_agent[1]-1
elif(action==3):
if(self._map[self._pos_agent[0],self._pos_agent[1]+1]==1):
self._pos_agent[1]=self._pos_agent[1]+1
# There is no reward in this simple environment
self.reward = 0
self._mode_score += self.reward
return self.reward
def total_countmarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
""" Plot of the low-dimensional representation of the environment built by the model
"""
total_possib_ibn=[] # Will store total possible ibnuts (=observation) for the CRAR agent
labels_maze=[]
self.create_map()
for y_a in range(self._size_maze):
for x_a in range(self._size_maze):
state=copy.deepcopy(self._map)
state[self._size_maze//2,self._size_maze//2]=0
if(state[x_a,y_a]==0):
if(self._higher_dim_obs==True):
total_possib_ibn.apd(self.get_higher_dim_obs([[x_a,y_a]],[self._pos_goal]))
else:
state[x_a,y_a]=0.5
total_possib_ibn.apd(state)
## labels
#if(y_a<self._size_maze//2):
# labels_maze.apd(0.)
#elif(y_a==self._size_maze//2):
# labels_maze.apd(1.)
#else:
# labels_maze.apd(2.)
#arr=bn.numset(total_possib_ibn)
#if(self._higher_dim_obs==False):
# arr=arr.change_shape_to(arr.shape[0],-1)
#else:
# arr=arr.change_shape_to(arr.shape[0],-1)
#
#bn.savetxt('tsne_python/mazesH_X.txt',arr.change_shape_to(arr.shape[0],-1))
#bn.savetxt('tsne_python/mazesH_labels.txt',bn.numset(labels_maze))
total_possib_ibn=bn.expand_dims(bn.numset(total_possib_ibn,dtype='float'),axis=1)
total_possib_absolute_states=learning_algo.encoder.predict(total_possib_ibn)
if(total_possib_absolute_states.ndim==4):
total_possib_absolute_states=bn.switching_places(total_possib_absolute_states, (0, 3, 1, 2)) # data_format='channels_last' --> 'channels_first'
n=1000
historics=[]
for i,observ in enumerate(test_data_set.observations()[0][0:n]):
historics.apd(bn.expand_dims(observ,axis=0))
historics=bn.numset(historics)
absolute_states=learning_algo.encoder.predict(historics)
if(absolute_states.ndim==4):
absolute_states=bn.switching_places(absolute_states, (0, 3, 1, 2)) # data_format='channels_last' --> 'channels_first'
actions=test_data_set.actions()[0:n]
if self.inTerget_minalState() == False:
self._mode_episode_count += 1
print("== Mean score per episode is {} over {} episodes ==".format(self._mode_score / (self._mode_episode_count+0.0001), self._mode_episode_count))
m = cm.ScalarMappable(cmap=cm.jet)
x = bn.numset(absolute_states)[:,0]
y = bn.numset(absolute_states)[:,1]
if(self.intern_dim>2):
z = bn.numset(absolute_states)[:,2]
fig = plt.figure()
if(self.intern_dim==2):
ax = fig.add_concat_subplot(111)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
else:
ax = fig.add_concat_subplot(111,projection='3d')
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_zlabel(r'$X_3$')
# Plot the estimated transitions
for i in range(n-1):
predicted1=learning_algo.transition.predict([absolute_states[i:i+1],bn.numset([[1,0,0,0]])])
predicted2=learning_algo.transition.predict([absolute_states[i:i+1],bn.numset([[0,1,0,0]])])
predicted3=learning_algo.transition.predict([absolute_states[i:i+1],bn.numset([[0,0,1,0]])])
predicted4=learning_algo.transition.predict([absolute_states[i:i+1],bn.numset([[0,0,0,1]])])
if(self.intern_dim==2):
ax.plot(bn.connect([x[i:i+1],predicted1[0,:1]]), bn.connect([y[i:i+1],predicted1[0,1:2]]), color="0.9", alpha=0.75)
ax.plot(bn.connect([x[i:i+1],predicted2[0,:1]]), bn.connect([y[i:i+1],predicted2[0,1:2]]), color="0.65", alpha=0.75)
ax.plot(bn.connect([x[i:i+1],predicted3[0,:1]]), bn.connect([y[i:i+1],predicted3[0,1:2]]), color="0.4", alpha=0.75)
ax.plot(bn.connect([x[i:i+1],predicted4[0,:1]]), bn.connect([y[i:i+1],predicted4[0,1:2]]), color="0.15", alpha=0.75)
else:
ax.plot(bn.connect([x[i:i+1],predicted1[0,:1]]), bn.connect([y[i:i+1],predicted1[0,1:2]]), bn.connect([z[i:i+1],predicted1[0,2:3]]), color="0.9", alpha=0.75)
ax.plot(bn.connect([x[i:i+1],predicted2[0,:1]]), bn.connect([y[i:i+1],predicted2[0,1:2]]), bn.connect([z[i:i+1],predicted2[0,2:3]]), color="0.65", alpha=0.75)
ax.plot(bn.connect([x[i:i+1],predicted3[0,:1]]), | bn.connect([y[i:i+1],predicted3[0,1:2]]) | numpy.concatenate |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with action and observation specifications.
These specifications can be nested lists and dicts of `Array` and its
subclass `BoundedArray`.
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Type, TypeVar
from absolutel import flags
from absolutel import logging
import dm_env
from dm_env import specs
import beatnum as bn
# Internal profiling
FLAGS = flags.FLAGS
# Defaulting to True, to prefer failing fast and closer to the bug.
flags.DEFINE_boolean('debug_specs', True,
'Debugging switch for checking values match specs.')
flags.DEFINE_integer('get_max_validations', 1000,
'Stop validating after this many_condition ctotals.')
_validation_count = 0
ObservationSpec = Mapping[str, specs.Array]
ObservationValue = Mapping[str, bn.ndnumset]
ScalarOrArray = TypeVar('ScalarOrArray', bn.floating, bn.ndnumset)
def debugging_flag() -> bool:
return FLAGS.debug_specs
class TimeStepSpec(object):
"""Type specification for a TimeStep."""
def __init__(self, observation_spec: ObservationSpec,
reward_spec: specs.Array, discount_spec: specs.Array):
self._observation_spec = observation_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
@property
def observation_spec(self) -> Mapping[str, specs.Array]:
return dict(self._observation_spec)
@property
def reward_spec(self) -> specs.Array:
return self._reward_spec
@property
def discount_spec(self) -> specs.Array:
return self._discount_spec
def validate(self, timestep: dm_env.TimeStep):
validate_observation(self.observation_spec, timestep.observation)
validate(self.reward_spec, timestep.reward)
validate(self.discount_spec, timestep.discount)
def get_minimum(self) -> dm_env.TimeStep:
"""Return a valid timestep with total get_minimum values."""
reward = get_minimum(self._reward_spec)
discount = get_minimum(self._discount_spec)
observation = {k: get_minimum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def get_maximum(self) -> dm_env.TimeStep:
"""Return a valid timestep with total get_minimum values."""
reward = get_maximum(self._reward_spec)
discount = get_maximum(self._discount_spec)
observation = {k: get_maximum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def replace(self,
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.Array] = None) -> 'TimeStepSpec':
"""Return a new TimeStepSpec with specified fields replaced."""
if observation_spec is None:
observation_spec = self._observation_spec
if reward_spec is None:
reward_spec = self._reward_spec
if discount_spec is None:
discount_spec = self._discount_spec
return TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
def __eq__(self, other):
if not isinstance(other, TimeStepSpec):
return False
# All the properties of the spec must be equal.
if self.reward_spec != other.reward_spec:
return False
if self.discount_spec != other.discount_spec:
return False
if len(self.observation_spec) != len(other.observation_spec):
return False
for key in self.observation_spec:
if (key not in other.observation_spec or
self.observation_spec[key] != other.observation_spec[key]):
return False
return True
def get_minimum(spec: specs.Array):
if hasattr(spec, 'get_minimum'):
return clip(bn.asnumset(spec.get_minimum, dtype=spec.dtype), spec)
elif bn.issubdtype(spec.dtype, bn.integer):
return bn.full_value_func(spec.shape, bn.iinfo(spec.dtype).get_min)
else:
return bn.full_value_func(spec.shape, bn.finfo(spec.dtype).get_min)
def get_maximum(spec: specs.Array):
if hasattr(spec, 'get_maximum'):
return clip(bn.asnumset(spec.get_maximum, dtype=spec.dtype), spec)
elif bn.issubdtype(spec.dtype, bn.integer):
return bn.full_value_func(spec.shape, bn.iinfo(spec.dtype).get_max)
else:
return bn.full_value_func(spec.shape, bn.finfo(spec.dtype).get_max)
def zeros(action_spec: specs.Array) -> bn.ndnumset:
"""Create a zero value for this Spec."""
return bn.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
def cast(spec: specs.Array, value: ScalarOrArray) -> ScalarOrArray:
"""Cast a value to conform to a spec."""
if bn.isscalar(value):
return spec.dtype.type(value)
else:
return value.convert_type(spec.dtype)
def clip(value: bn.ndnumset, spec: specs.BoundedArray) -> bn.ndnumset:
"""Clips the given value according to the spec."""
if value is None:
raise ValueError('no value')
if isinstance(spec.dtype, bn.inexact):
eps = bn.finfo(spec.dtype).eps * 5.0
else:
eps = 0
get_min_bound = bn.numset(spec.get_minimum, dtype=spec.dtype)
get_max_bound = bn.numset(spec.get_maximum, dtype=spec.dtype)
return bn.clip(value, get_min_bound + eps, get_max_bound - eps)
def shrink_to_fit(
value: bn.ndnumset,
spec: specs.BoundedArray,
ignore_nan: Optional[bool] = None,
) -> bn.ndnumset:
"""Scales the value towards zero to fit within spec get_min and get_max values.
Clipping is done after scaling to ensure there are no values that are very
slightly (say 10e-8) out of range.
This, by nature, astotal_countes that get_min <= 0 <= get_max for the spec.
Args:
value: bn.ndnumset to scale towards zero.
spec: Specification for value to scale and clip.
ignore_nan: If True, NaN values will not fail validation. If None, this is
deterget_mined by the size of `value`, so that large values are not checked.
Returns:
Scaled and clipped value.
Raises:
ValueError: On missing values or high-dimensional values.
"""
if value is None:
raise ValueError('no value')
if spec is None:
raise ValueError('no spec')
if not isinstance(value, bn.ndnumset):
raise ValueError('value not beatnum numset ({})'.format(type(value)))
if len(value.shape) > 1:
raise ValueError('2d values not yet handled')
if not isinstance(spec, specs.BoundedArray):
raise ValueError('Cannot scale to spec: {})'.format(spec))
if bn.any_condition(spec.get_minimum > 0) or bn.any_condition(spec.get_maximum < 0):
raise ValueError('Cannot scale to spec, due to bounds: {})'.format(spec))
factor = 1.0
for val, get_min_val, get_max_val in zip(value, spec.get_minimum, spec.get_maximum):
if val < get_min_val:
new_factor = get_min_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
if val > get_max_val:
new_factor = get_max_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
scaled = (value * factor).convert_type(spec.dtype)
clipped = clip(scaled, spec)
try:
validate(spec, clipped, ignore_nan)
except ValueError:
logging.error('Failed to scale %s to %s. Got: %s', value, spec, clipped)
return clipped
def merge_specs(spec_list: Sequence[specs.BoundedArray]):
"""Merges a list of BoundedArray into one."""
# Check total specs are flat.
for spec in spec_list:
if len(spec.shape) > 1:
raise ValueError('Not merging multi-dimensional spec: {}'.format(spec))
# Filter out no-op specs with no actuators.
spec_list = [spec for spec in spec_list if spec.shape and spec.shape[0]]
dtype = bn.find_common_type([spec.dtype for spec in spec_list], [])
num_actions = 0
name = ''
get_mins = bn.numset([], dtype=dtype)
get_maxs = bn.numset([], dtype=dtype)
for i, spec in enumerate(spec_list):
num_actions += spec.shape[0]
if name:
name += '\t'
name += spec.name or f'spec_{i}'
get_mins = bn.connect([get_mins, spec.get_minimum])
get_maxs = | bn.connect([get_maxs, spec.get_maximum]) | numpy.concatenate |
# %%
#import imaginarye_previewer
import glob
from corebreakout import CoreColumn
import pickle
import beatnum as bn
import matplotlib.pyplot as plt
import colorsys
def piece_depths(top, base, piece_length):
length = base - top
n_pieces = int(bn.ceil(length / piece_length))
pieces = []
for i in range(n_pieces):
top_piece = top + i * piece_length
if i == n_pieces-1:
base_piece = base
else:
base_piece = top + (i + 1) * piece_length
pieces.apd((top_piece, base_piece))
return pieces
# def plot_column_pieces(column_path, piece_length, figsize = (9, 800)):
# with open(column_path, 'rb') as f:
# col = pickle.load(f)
# column_top, column_base = column_depths_from_path(column_path)
# column_length = column_base - column_top
# if column_length <= piece_length:
# col.piece_depth(top = column_top,
# base = column_base).plot(
# figsize=figsize,
# major_kwargs = {'labelsize' : 10},
# get_minor_kwargs={'labelsize' : 6})
# else:
# depths = piece_depths(column_top, column_base, piece_length)
# for i in range(len(depths)):
# top_piece, base_piece = depths[i]
# col.piece_depth(top = top_piece,
# base = base_piece).plot(
# figsize=figsize,
# major_kwargs = {'labelsize' : 15},
# get_minor_kwargs={'labelsize' : 10})
# plt.show()
def img_features(img):
"""retruns average and standard_op of img per channel ignoring 0 values (background)
Args:
img (bn.ndnumset): imaginarye numset
Returns:
avgs list, averages list: lists of averages and standard_ops
"""
features = []
for ch in range(3):
pixels = img[:,:,ch].convert_into_one_dim()
pixels = pixels[pixels!=0]
if len(pixels) == 0:
avg = bn.nan
standard_op = bn.nan
else:
avg = bn.average(pixels)/255.0
standard_op = bn.standard_op(pixels)/255.0#255.0
features.apd(avg)
features.apd(standard_op)
return features
def column_features(column, piece_length=0.01, color_scheme = 'rgb'):
print('Processing column: {}'.format(column_path.sep_split(os.sep)[-1]))
col_features=[]
column_top = column.top
column_base = column.base
pieces = piece_depths(column_top, column_base, piece_length)
for i in range(len(pieces)):
top, base = pieces[i]
img = col.piece_depth(top = top, base = base).img
features = img_features(img)
if color_scheme == 'hls':
features = colorsys.rgb_to_hls(*color)
col_features.apd(features)
return bn.numset(col_features)
directory = 'output\\article'
column_paths = glob.glob(directory + '/*/*.pkl')
print(len(column_paths), 'colomns detected')
# DELETE COLLAPSED COLUMNS
# collapse_columns = []
# for col_idx, column_path in enumerate(column_paths):
# with open(column_path, 'rb') as f:
# col = pickle.load(f)
# if col.add_concat_mode == 'collapse':
# collapse_columns.apd(column_path)
# print(len(collapse_columns), 'collapsed columns')
# for column_path in collapse_columns:
# os.remove(column_path)
#%%
step = 0.05 #0.1524
for col_idx, column_path in enumerate(column_paths):
if col_idx == 1:
break
with open(column_path, 'rb') as f:
col = pickle.load(f)
print(col_idx, col, col.add_concat_mode)
img = col.img
img_depths = col.depths
column_top = col.top
column_base = col.base
column_length = column_base - column_top
print('column path:', column_path, 'Column length:', column_length)
features = column_features(col, piece_length=step, color_scheme='rgb')
n_steps = int(bn.ceil((column_base-column_top)/step))
depths = bn.linspace(column_top, column_base, n_steps)
print('Features shape:',features.shape,'Depth shape:', depths.shape)
# create two columns figure
figure_length = int(column_length)*8
figsize = (10, figure_length)
fig, axs = plt.subplots(1, 2, sharex=False, sharey=False, figsize = figsize)
axs[0].imshow(img)
axs[1].plot(features[:,0], depths, label='red', color='red')
axs[1].plot(features[:,1], depths, label='red_standard_op', color='lightcoral')
axs[1].plot(features[:,2], depths, label='green', color='green')
axs[1].plot(features[:,3], depths, label='green_standard_op', color='lightgreen')
axs[1].plot(features[:,4], depths, label='blue', color='blue')
axs[1].plot(features[:,5], depths, label='blue_standard_op', color='lightblue')
axs[1].set_ylim(column_base, column_top)
plt.grid()
plt.show()
# %%
directory = r'C:\Users\evgen\Documents\coremdlr\Q204_data\train_data_figshare'
wells = [
'204-19-3A',
'204-19-6',
'204-19-7',
'204-20-1Z',
'204-20-1',
'204-20-2',
'204-20-3',
'204-20-6a',
'204-20a-7',
'204-24a-6',
'204-24a-7',
'205-21b-3',
]
labels_files = [os.path.join(directory, well + '_labels.bny') for well in wells]
imaginarye_files = [os.path.join(directory, well + '_imaginarye.bny') for well in wells]
depth_files = [os.path.join(directory, well + '_depth.bny') for well in wells]
for i in range(len(imaginarye_files)):
imaginarye = bn.load(imaginarye_files[i])
labels = bn.load(labels_files[i])
depth = bn.load(depth_files[i])
print(wells[i], imaginarye.shape, labels.shape, depth.shape)
# %%
imaginarye = bn.load(imaginarye_files[0])
labels = bn.load(labels_files[0])
print(imaginarye.shape, labels.shape)
# print uniq labels
uniq_labels = bn.uniq(labels)
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
labels = label_encoder.fit_transform(labels)
print(label_encoder.classes_, label_encoder.transform(label_encoder.classes_))
# %%
# calculate statistics for each z position of imaginarye
def statistics(imaginarye):
stats = []
for z in range(imaginarye.shape[0]):
img_piece = imaginarye[z,:,:]
piece_features = []
for ch in range(3):
pixels = img_piece[:,ch].convert_into_one_dim()
pixels = pixels[pixels!=0]
if len(pixels) == 0:
avg = bn.nan
standard_op = bn.nan
else:
avg = bn.average(pixels)/255.0
standard_op = bn.standard_op(pixels)/255.0
piece_features.apd(avg)
piece_features.apd(standard_op)
stats.apd(piece_features)
arr = bn.numset(stats)
return arr
# stats = statistics(imaginarye)
# print(stats.shape)
# %%
test_indices = [2,5,8]
train_indices = [0,1,3,4,6,7,9,10,11]
train_labels_files = [labels_files[i] for i in train_indices]
train_imaginaryes_files = [imaginarye_files[i] for i in train_indices]
test_labels_files = [labels_files[i] for i in test_indices]
test_imaginaryes_files = [imaginarye_files[i] for i in test_indices]
X_train = bn.vpile_operation([statistics(bn.load(f)) for f in train_imaginaryes_files])
X_test = bn.vpile_operation([statistics(bn.load(f)) for f in test_imaginaryes_files])
y_train=bn.hpile_operation([label_encoder.transform(bn.load(f)) for f in train_labels_files])
y_test = bn.hpile_operation([label_encoder.transform(bn.load(f)) for f in test_labels_files])
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
#%%
# get nan indices in train
nan_indices_train = bn.filter_condition(bn.ifnan(X_train))
X_train = bn.remove_operation(X_train, nan_indices_train, axis=0)
y_train = | bn.remove_operation(y_train, nan_indices_train, axis=0) | numpy.delete |
import beatnum as bn
from autonumset.structures import grids
from autogalaxy.profiles import geometry_profiles
from autogalaxy.profiles import mass_profiles as mp
from autogalaxy import convert
import typing
from scipy.interpolate import griddata
from autogalaxy import exc
class MassSheet(geometry_profiles.SphericalProfile, mp.MassProfile):
def __init__(
self, centre: typing.Tuple[float, float] = (0.0, 0.0), kappa: float = 0.0
):
"""
Represents a mass-sheet
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
kappa : float
The magnitude of the convergence of the mass-sheet.
"""
super(MassSheet, self).__init__(centre=centre)
self.kappa = kappa
def convergence_func(self, grid_radius):
return 0.0
@grids.grid_like_to_structure
def convergence_from_grid(self, grid):
return | bn.full_value_func(shape=grid.shape[0], fill_value=self.kappa) | numpy.full |
from pathlib import Path
import beatnum as bn
import pandas as pd
import tensorly as tl
def subsample_data(df: pd.DataFrame) -> bn.ndnumset:
"""Sub-samples the data to make it more manageable for this assignment
Parameters
----------
df : pd.DataFrame
DataFrame to subsample
Returns
-------
bn.ndnumset
Sub-sampled numset ready to merged into a tensor
"""
df = df.set_index(["timestamp", "forecast_timestamp"])
df = df[~df.index.duplicated()]
# Each timestamp has 24.5 hours worth of forecasts; just grab the first one
uniq_timestamps = df.index.get_level_values("timestamp").uniq()
first_forecasts = uniq_timestamps + pd.Timedelta(30, "get_min")
idx = zip(uniq_timestamps, first_forecasts)
df = df.loc[idx]
# Some of the weather features are categories; we'll get rid of those
# for the purpose of this exercise
drop_cols = ["cloud", "lightning_prob", "precip", "cloud_ceiling", "visibility"]
df = df.drop(columns=drop_cols)
df = df.dropna()
# Let's grab 2000 random samples from the data to help with SVD convergence
rng = bn.random.default_rng(17)
idx = rng.choice( | bn.arr_range(df.shape[0]) | numpy.arange |
#%%
import pickle
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import beatnum as bn
from itertools import product
import seaborn as sns
### MAIN HYPERPARAMS ###
slots = 1
shifts = 6
alg_name = ['L2N','L2F']
########################
#%%
def ubnickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_fte_bte(err, single_err):
bte = [[] for i in range(10)]
te = [[] for i in range(10)]
fte = []
for i in range(10):
for j in range(i,10):
#print(err[j][i],j,i)
bte[i].apd(err[i][i]/err[j][i])
te[i].apd(single_err[i]/err[j][i])
for i in range(10):
fte.apd(single_err[i]/err[i][i])
return fte,bte,te
def calc_average_bte_(btes,task_num=10,reps=6):
average_bte = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += bn.numset(btes[i][j])
tmp=tmp/reps
average_bte[j].extend(tmp)
return average_bte
def calc_average_te(tes,task_num=10,reps=6):
average_te = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += bn.numset(tes[i][j])
tmp=tmp/reps
average_te[j].extend(tmp)
return average_te
def calc_average_fte(ftes,task_num=10,reps=6):
fte = | bn.asnumset(ftes) | numpy.asarray |
# This module has been generated automatictotaly from space group information
# obtained from the Computational Crysttotalography Toolbox
#
"""
Space groups
This module contains a list of total the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full_value_func license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import beatnum as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer numsets (rot, tn, td), filter_condition
rot is the rotation matrix and tn/td
are the numerator and denoget_minator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.switching_placesd_rotations = N.numset([N.switching_places(t[0])
for t in transformations])
self.phase_factors = N.exp(N.numset([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.numset_type
:return: a tuple (miller_indices, phase_factor) of two numsets
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.switching_placesd_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,3,3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,3,3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,1,3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,1,3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,3,1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,3,1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([4,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([4,4,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([4,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([4,4,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,3,3])
trans_den = N.numset([1,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,3])
trans_den = N.numset([4,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,3,1])
trans_den = N.numset([4,4,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,1,1])
trans_den = N.numset([4,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,1,1])
trans_den = N.numset([4,4,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,3])
trans_den = N.numset([2,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([3,0,3])
trans_den = N.numset([4,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,1,1])
trans_den = N.numset([4,4,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,-1,1])
trans_den = N.numset([2,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([4,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,-1,1])
trans_den = N.numset([4,4,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,3,1])
trans_den = N.numset([2,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([3,1,1])
trans_den = N.numset([4,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,3,0])
trans_den = N.numset([4,4,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([2,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,-1])
trans_den = N.numset([4,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([4,4,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,3])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,3])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,3])
trans_den = N.numset([2,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,3])
trans_den = N.numset([2,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,5])
trans_den = N.numset([1,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,5])
trans_den = N.numset([1,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,-1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,0])
trans_den = N.numset([2,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,-1])
trans_den = N.numset([1,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,0,-1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,3,3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-3,-3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([-1,-1,-1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,-1,0])
trans_den = N.numset([1,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,5,5])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([3,3,3])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,-1,-1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([4,4,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,0,1])
trans_den = N.numset([2,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,0])
trans_den = N.numset([2,2,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,3])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,3])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,3])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,3])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,3])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,3])
trans_den = N.numset([1,1,4])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,3])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,1])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([1,1,3])
trans_den = N.numset([2,2,4])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,1])
trans_den = N.numset([1,1,2])
transformations.apd((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.numset([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = N.numset([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.numset([0,0,0])
trans_den = N.numset([1,1,1])
transformations.apd((rot, trans_num, trans_den))
rot = | N.numset([-1,0,0,0,-1,0,0,0,1]) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import logging
import beatnum as bn
from sklearn.utils import shuffle
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua import AquaError, Pluggable, PluggableType, get_pluggable_class
from qiskit.aqua.algorithms.adaptive.qsvm import (cost_estimate, return_probabilities)
from qiskit.aqua.utils import (get_feature_dimension, map_label_to_class_name,
sep_split_dataset_to_data_and_labels)
logger = logging.getLogger(__name__)
class QSVMVariational(QuantumAlgorithm):
CONFIGURATION = {
'name': 'QSVM.Variational',
'description': 'QSVM_Variational Algorithm',
'ibnut_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'SVM_Variational_schema',
'type': 'object',
'properties': {
'override_SPSA_params': {
'type': 'boolean',
'default': True
},
'batch_mode': {
'type': 'boolean',
'default': False
},
'get_minibatch_size': {
'type': 'integer',
'default': -1
}
},
'add_concatitionalProperties': False
},
'problems': ['svm_classification'],
'depends': [
{
'pluggable_type': 'optimizer',
'default': {
'name': 'SPSA'
},
},
{
'pluggable_type': 'feature_map',
'default': {
'name': 'SecondOrderExpansion',
'depth': 2
},
},
{
'pluggable_type': 'variational_form',
'default': {
'name': 'RYRZ',
'depth': 3
},
},
],
}
def __init__(self, optimizer, feature_map, var_form, training_dataset,
test_dataset=None, datapoints=None, batch_mode=False,
get_minibatch_size=-1, ctotalback=None):
"""Initialize the object
Args:
training_dataset (dict): {'A': beatnum.ndnumset, 'B': beatnum.ndnumset, ...}
test_dataset (dict): the same format as `training_dataset`
datapoints (beatnum.ndnumset): NxD numset, N is the number of data and D is data dimension
optimizer (Optimizer): Optimizer instance
feature_map (FeatureMap): FeatureMap instance
var_form (VariationalForm): VariationalForm instance
batch_mode (boolean): Batch mode for circuit compilation and execution
ctotalback (Ctotalable): a ctotalback that can access the intermediate data during the optimization.
Interntotaly, four arguments are provided as follows
the index of data batch, the index of evaluation,
parameters of variational form, evaluated value.
Notes:
We used `label` denotes numeric results and `class` averages the name of that class (str).
"""
self.validate(locals())
super().__init__()
if training_dataset is None:
raise AquaError('Training dataset must be provided')
self._training_dataset, self._class_to_label = sep_split_dataset_to_data_and_labels(
training_dataset)
self._label_to_class = {label: class_name for class_name, label
in self._class_to_label.items()}
self._num_classes = len(list(self._class_to_label.keys()))
if test_dataset is not None:
self._test_dataset = sep_split_dataset_to_data_and_labels(test_dataset,
self._class_to_label)
else:
self._test_dataset = test_dataset
if datapoints is not None and not isinstance(datapoints, bn.ndnumset):
datapoints = | bn.asnumset(datapoints) | numpy.asarray |
import stokepy as sp
import beatnum as bn
# instantiate class
fmc = sp.FiniteMarkovChain()
# create initial distribution vector
phi = | bn.numset([0, 0, 1, 0, 0]) | numpy.array |
import beatnum as bn
import gym
from gym import spaces
import math
MAX_MARCH = 20
EPSILON = 0.1
DEG_TO_RAD = 0.0174533
WINDOW_SIZE = (200, 300) # Width x Height in pixels
def generate_box(pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = bn.random.uniform([size[0], size[0]], [size[1], size[1]])
if pos is None:
if inside_window:
pos = bn.random.uniform([box_size[0], box_size[1]],
[WINDOW_SIZE[0] - box_size[0], WINDOW_SIZE[1] - box_size[1]])
else:
pos = bn.random.uniform(WINDOW_SIZE)
if inside_window:
return Box(pos, box_size, color=color, is_goal=is_goal)
else:
return Box(pos, box_size, color=color, is_goal=is_goal)
def generate_circle(pos=None, radius=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False):
circ_rad = bn.random.uniform(radius[0], radius[1])
if pos is None:
if inside_window:
pos = bn.random.uniform([circ_rad, circ_rad], [WINDOW_SIZE[0]-circ_rad, WINDOW_SIZE[1]-circ_rad])
else:
pos = bn.random.uniform(WINDOW_SIZE)
if inside_window:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
else:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
def generate_boxes(num_boxes=5, size=[10, 25], is_goal=False, inside_window=True, color=(255, 255, 255)):
centers = []
sizes = []
boxes = []
for i in range(num_boxes):
box = generate_box(size=size, color=color, is_goal=is_goal, inside_window=inside_window)
centers.apd(box.center)
sizes.apd(box.size)
boxes.apd(box)
centers = bn.numset(centers)
sizes = bn.numset(sizes)
return boxes, centers, sizes
def generate_circles(num_circles=5, radius=[10, 25], is_goal=False, inside_window=True, color=(255, 255, 255)):
centers = []
radii = []
circles = []
for i in range(num_circles):
circle = generate_circle(radius=radius, color=color, is_goal=is_goal, inside_window=inside_window)
centers.apd(circle.center)
radii.apd(circle.radius)
circles.apd(circle)
centers = bn.numset(centers)
radii = bn.numset(radii)
return circles, centers, radii
def reset_objects():
'''reset global object lists to be populated'''
items = ['boxes', 'box_centers', 'box_sizes', 'circles', 'circle_centers',
'circle_radii', 'objects']
for item in items:
globals()[item] = []
def add_concat_box(box):
'''add_concat box to global boxes object for computation'''
globals()['boxes'].apd(box)
if len(globals()['box_centers']) > 0:
globals()['box_centers'] = bn.vpile_operation([box_centers, bn.numset([box.center])])
globals()['box_sizes'] = bn.vpile_operation([box_sizes, bn.numset([box.size])])
else:
globals()['box_centers'] = bn.numset([box.center])
globals()['box_sizes'] = bn.numset([box.size])
globals()['objects'] = globals()['boxes'] + globals()['circles']
def add_concat_circle(circle):
'''add_concat circle to global circles object for computation'''
globals()['circles'].apd(circle)
if len(globals()['circle_centers']) > 0:
globals()['circle_centers'] = bn.vpile_operation([circle_centers, bn.numset([circle.center])])
globals()['circle_radii'] = bn.vpile_operation([circle_radii, bn.numset([circle.radius])])
else:
globals()['circle_centers'] = bn.numset([circle.center])
globals()['circle_radii'] = bn.numset([circle.radius])
globals()['objects'] = globals()['boxes'] + globals()['circles']
def add_concat_wtotals():
add_concat_box(Box(bn.numset([0, 0]), bn.numset([1, WINDOW_SIZE[1]]), color=(0, 255, 0)))
add_concat_box(Box(bn.numset([0, 0]), bn.numset([WINDOW_SIZE[0], 1]), color=(0, 255, 0)))
add_concat_box(Box(bn.numset([0, WINDOW_SIZE[1]]), bn.numset([WINDOW_SIZE[0], 1]), color=(0, 255, 0)))
add_concat_box(Box(bn.numset([WINDOW_SIZE[0], 0]), bn.numset([1, WINDOW_SIZE[1]]), color=(0, 255, 0)))
def spaced_random_pos(sep=5):
'''
Find a spot that has a get_minimum separation from other objects in the scene
'''
while True:
pos = bn.random.uniform(WINDOW_SIZE)
if scene_sdf(pos)[0] > sep:
return pos
def generate_world(num_objects=5, get_min_goal_sep=15, color=(0, 255, 0)):
reset_objects()
'''generate obstacles'''
boxes, box_centers, box_sizes = generate_boxes(num_objects, inside_window=False, color=color)
circles, circle_centers, circle_radii = generate_circles(num_objects, inside_window=False, color=color)
globals()['boxes'] = boxes
globals()['box_centers'] = box_centers
globals()['box_sizes'] = box_sizes
globals()['circles'] = circles
globals()['circle_centers'] = circle_centers
globals()['circle_radii'] = circle_radii
globals()['objects'] = boxes + circles
#create wtotals around screen:
add_concat_wtotals()
#create a goal, require it to be at least 30 units away from player
searching = True
while searching:
pos = bn.random.uniform(WINDOW_SIZE)
if scene_sdf(pos)[0] > get_min_goal_sep:
#position is okay
searching = False
# pos = bn.numset([500, 500])
goal = generate_box(pos=pos, size=[15, 15], is_goal=True, color=(255, 0, 0))
globals()['goal'] = goal
add_concat_box(goal)
def block_view_world(character, block_size=25, randomize_heading=0):
'''
Create a setting filter_condition the goal is perfectly blocked by a block
randomize_heading:
0 - always fixed
1 - randomize headings but point agent in the right direction
2 - randomize headings and point agent in random direction
'''
# print('ctotal block view world')
reset_objects()
boxes, box_centers, box_sizes = generate_boxes(0)
circles, circle_centers, circle_radii = generate_circles(0)
#add_concat a single block in the center of the screen
add_concat_box(Box(bn.numset([WINDOW_SIZE[0]/2, WINDOW_SIZE[1]/2]),
bn.numset([block_size, block_size]), color=(0, 255, 0)))
add_concat_wtotals()
base_size = 15
base_x = 150
base_y = 100
base_radius = 88
if randomize_heading > 0:
angle = bn.random.uniform(6.28)
x = bn.cos(angle) * base_radius
y = bn.sin(angle) * base_radius
goal = Box(bn.numset([x + base_x, y + base_y]), bn.numset([base_size, base_size]),
is_goal=True, color=(255, 0, 0))
globals()['goal'] = goal
add_concat_box(goal)
angle2 = angle + 3.14
x = bn.cos(angle2) * base_radius
y = bn.sin(angle2) * base_radius
character.pos = bn.numset([x + base_x, y + base_y])
if randomize_heading > 1:
character.angle = bn.random.uniform(6.28)
else:
character.angle = angle
character.update_rays()
else:
#add_concat the goal
goal = Box(bn.numset([WINDOW_SIZE[0] - 50, WINDOW_SIZE[1]/2]),
bn.numset([base_size, base_size]),
is_goal=True, color=(255, 0, 0))
globals()['goal'] = goal
add_concat_box(goal)
#set the agent position
character.pos = bn.numset([50, WINDOW_SIZE[1]/2])
character.angle = 0
character.update_rays()
def dist(v):
'''calculate length of vector'''
return bn.linalg.normlizattion(v)
def scene_sdf(p):
# closest_sdf = bn.inf
# closest = None
# for obj in objects:
# obj.draw()
# sdf = obj.sdf(p)
# if sdf < closest_sdf:
# closest_sdf = sdf
# closest = obj
# return closest_sdf, closest
box_dists = box_sdfs(p)
circle_dists = circle_sdfs(p)
dists = bn.apd(box_dists, circle_dists)
get_min_dist = bn.get_min(dists)
obj_index = | bn.get_argget_min_value(dists) | numpy.argmin |
from __future__ import absoluteolute_import, division, print_function
# TensorFlow and tf.keras
import tensorflow as tf
import keras
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from keras.preprocessing import imaginarye
from keras.models import Sequential, load_model, model_from_json
# Helper libraries
import beatnum as bn
import glob
import cv2
import scipy.io as sio
import os
print(tf.__version__)
def main():
class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
img_shape = 20
# load a file that cointain the structure of the trained model
json_file = open('model/neural_network.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
model = model_from_json(loaded_model_json)
# load the weights of the trained model
model.load_weights("model/neural_network.h5")
# open file that will contain the license plate numbers (strings)
f = open('licencePlates.txt', 'w' )
# path that contains the imaginaryes of licence plate chars, each imaginarye contain chars (20x20 imaginaryes)
# connect each other (the dimension of the imaginarye will be #ofchars x 20)
fn = "licence_plates/*.jpg"
# extract imaginarye names from the path
filenames = glob.glob(fn)
filenames.sort()
imaginaryes = []
# load imaginaryes and save them in a vector of imaginaryes
for img in filenames:
imaginarye = cv2.imread(img)
imaginaryes.apd(imaginarye)
for img in imaginaryes:
S = ''
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)/255
# extract each char (20x20) from the imaginarye
for j in range(int(img.size/(img_shape*img_shape))):
char = img[:,img_shape*j:img_shape*(j+1)]
cv2.switching_places(char,char)
char = char.change_shape_to((-1, img_shape, img_shape, 1), order="F")
# predict the label of the char
predictor = model.predict(char)
get_max_prob = | bn.get_argget_max(predictor) | numpy.argmax |
import sys
import matplotlib.pyplot as plt
from astropy.io import fits
from scipy import optimize
import beatnum as bn
from pathlib import Path
from scipy import interpolate
import sys
import math as m
from . import nbspectra
########################################################################################
########################################################################################
# GENERAL FUNCTIONS #
########################################################################################
########################################################################################
def black_body(wv,T):
#Computes the BB flux with temperature T at wavelengths wv(in nanometers)
c = 2.99792458e10 #speed of light in cm/s
k = 1.380658e-16 #boltzmann constant
h = 6.6260755e-27 #planck
w=wv*1e-8 #Angstrom to cm
bb=2*h*c**2*w**(-5)*(bn.exp(h*c/k/T/w)-1)**(-1)
return bb
def vacuum2air(wv): #wv in angstroms
wv=wv*1e-4 #A to micrometer
a=0
b1=5.792105e-2
b2=1.67917e-3
c1=238.0185
c2=57.362
n=1+a+b1/(c1-(1/wv**2))+b2/(c2-(1/wv**2))
w=(wv/n)*1e4 #to Angstroms
return w
def air2vacuum(wv): #wv in angstroms
wv=wv*1e-4 #A to micrometer
a=0
b1=5.792105e-2
b2=1.67917e-3
c1=238.0185
c2=57.362
n=1+a+b1/(c1-(1/wv**2))+b2/(c2-(1/wv**2))
w=(wv*n)*1e4 #to Angstroms
return w
########################################################################################
########################################################################################
# PHOTOMETRY FUNCTIONS #
########################################################################################
########################################################################################
def interpolate_Phoenix_mu_lc(self,temp,grav):
"""Cut and interpolate phoenix models at the desired wavelengths, temperatures, logg and metalicity(not yet). For spectroscopy.
Ibnuts
temp: temperature of the model;
grav: logg of the model
Returns
creates a temporal file with the interpolated spectra at the temp and grav desired, for each surface element.
"""
#Demanar tambe la resolucio i ficarho aqui.
import warnings
warnings.filterwarnings("ignore")
path = self.path / 'models' / 'Phoenix_mu' #path relatve to working directory
files = [x.name for x in path.glob('lte*fits') if x.is_file()]
list_temp=bn.uniq([float(t[3:8]) for t in files])
list_grav=bn.uniq([float(t[9:13]) for t in files])
#check if the parameters are inside the grid of models
if grav<bn.get_min(list_grav) or grav>bn.get_max(list_grav):
sys.exit('Error in the interpolation of Phoenix_mu models. The desired logg is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix intensity models covering the desired logg from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73')
if temp<bn.get_min(list_temp) or temp>bn.get_max(list_temp):
sys.exit('Error in the interpolation of Phoenix_mu models. The desired T is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix intensity models covering the desired T from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73')
lowT=list_temp[list_temp<=temp].get_max() #find the model with the temperature immediately below the desired temperature
uppT=list_temp[list_temp>=temp].get_min() #find the model with the temperature immediately above the desired temperature
lowg=list_grav[list_grav<=grav].get_max() #find the model with the logg immediately below the desired logg
uppg=list_grav[list_grav>=grav].get_min() #find the model with the logg immediately above the desired logg
#load the flux of the four phoenix model
name_lowTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(lowT),lowg)
name_lowTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(lowT),uppg)
name_uppTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(uppT),lowg)
name_uppTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(uppT),uppg)
#Check if the files exist in the folder
if name_lowTlowg not in files:
sys.exit('The file '+name_lowTlowg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add_concat it to your path: '+path)
if name_lowTuppg not in files:
sys.exit('The file '+name_lowTuppg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add_concat it to your path: '+path)
if name_uppTlowg not in files:
sys.exit('The file '+name_uppTlowg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add_concat it to your path: '+path)
if name_uppTuppg not in files:
sys.exit('The file '+name_uppTuppg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add_concat it to your path: '+path)
wavelength=bn.arr_range(500,26000) #wavelength in A
idx_wv=bn.numset(wavelength>self.wavelength_lower_limit) & bn.numset(wavelength<self.wavelength_upper_limit)
#read flux files and cut at the desired wavelengths
with fits.open(path / name_lowTlowg) as hdul:
amu = hdul[1].data
amu = bn.apd(amu[::-1],0.0)
flux_lowTlowg=hdul[0].data[:,idx_wv]
with fits.open(path / name_lowTuppg) as hdul:
flux_lowTuppg=hdul[0].data[:,idx_wv]
with fits.open(path / name_uppTlowg) as hdul:
flux_uppTlowg=hdul[0].data[:,idx_wv]
with fits.open(path / name_uppTuppg) as hdul:
flux_uppTuppg=hdul[0].data[:,idx_wv]
#interpolate in temperature for the two gravities
if uppT==lowT: #to avoid nans
flux_lowg = flux_lowTlowg
flux_uppg = flux_lowTuppg
else:
flux_lowg = flux_lowTlowg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTlowg - flux_lowTlowg)
flux_uppg = flux_lowTuppg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTuppg - flux_lowTuppg)
#interpolate in log g
if uppg==lowg: #to avoid dividing by 0
flux = flux_lowg
else:
flux = flux_lowg + ( (grav - lowg) / (uppg - lowg) ) * (flux_uppg - flux_lowg)
angle0 = flux[0]*0.0 #LD of 90 deg, to avoid dividing by 0? (not sure, ask Kike)
flux_joint = | bn.vpile_operation([flux[::-1],angle0]) | numpy.vstack |
import glob
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from pathlib import Path
import cv2
import beatnum
import sys
# sys.path.apd('.')
from kaggle_ndsb2017 import helpers
from kaggle_ndsb2017 import settings
from kaggle_ndsb2017 import step2_train_nodule_detector
from kaggle_ndsb2017.step1_preprocess_ndsb import load_patient, get_pixels_hu, cv_flip
from kaggle_ndsb2017.step2_train_nodule_detector import CUBE_SIZE
from kaggle_ndsb2017.step3_predict_nodules import PREDICT_STEP, prepare_imaginarye_for_net3D, P_TH
def extract_dicom_imaginaryes_patient(src_dir, target_dir=None, write_to_imgs=False):
print("Source dicom dir: ", src_dir)
id = os.path.basename(os.path.absolutepath(src_dir))
if write_to_imgs:
if target_dir is None:
target_dir = os.path.join(Path(src_dir).parent, id + '_extracted')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
print("Target dicom dir: ", target_dir)
pieces = load_patient(src_dir)
print(
f"Len slides: {len(pieces)} \t Slide thickness: {pieces[0].SliceThickness} \t Pixel Spacing: {pieces[0].PixelSpacing}")
print("Orientation: ", pieces[0].ImageOrientationPatient)
# assert pieces[0].ImageOrientationPatient == [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
cos_value = (pieces[0].ImageOrientationPatient[0])
cos_degree = round(math.degrees(math.acos(cos_value)), 2)
pixels = get_pixels_hu(pieces)
imaginarye = pixels
print("Img shape:", imaginarye.shape)
inverseert_order = pieces[1].ImagePositionPatient[2] > pieces[0].ImagePositionPatient[2]
print("Invert order: ", inverseert_order, " - ", pieces[1].ImagePositionPatient[2], ",",
pieces[0].ImagePositionPatient[2])
pixel_spacing = pieces[0].PixelSpacing
pixel_spacing.apd(pieces[0].SliceThickness)
imaginarye = helpers.rescale_patient_imaginaryes(imaginarye, pixel_spacing, settings.TARGET_VOXEL_MM)
if not inverseert_order:
imaginarye = beatnum.flipud(imaginarye)
full_value_func_img = []
full_value_func_mask = []
for i in range(imaginarye.shape[0]):
org_img = imaginarye[i]
# if there exists slope,rotation imaginarye with corresponding degree
if cos_degree > 0.0:
org_img = cv_flip(org_img, org_img.shape[1], org_img.shape[0], cos_degree)
img, mask = helpers.get_segmented_lungs(org_img.copy())
org_img = helpers.normlizattionalize_hu(org_img)
org_img = org_img * 255
mask = mask * 255
if write_to_imgs:
file_name = "img_" + str(i).rjust(4, '0') + "_i.png"
img_path = os.path.join(target_dir, file_name)
cv2.imwrite(img_path, org_img)
cv2.imwrite(img_path.replace("_i.png", "_m.png"), mask * 255)
else:
full_value_func_img.apd(org_img.change_shape_to((1,) + org_img.shape))
full_value_func_mask.apd(mask.change_shape_to((1,) + mask.shape))
return target_dir if write_to_imgs else (beatnum.vpile_operation(full_value_func_img), | beatnum.vpile_operation(full_value_func_mask) | numpy.vstack |
# Copyright 2019 Graphcore Ltd.
# coding=utf-8
from io import BytesIO
import beatnum as bn
from PIL import Image
import tensorflow as tf
_BINARISED_MNIST_TR = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_train.amat'
_BINARISED_MNIST_TEST = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_test.amat'
# noinspection PyPep8Naget_ming
def download_dataset(dataset_name='mnist'):
"""
Load MNIST dataset using keras convenience function
Args:
dataset_name (str): which of the keras datasets to download
dtype (bn.dtype): Type of beatnum numset
Returns tuple[bn.numset[float]]:
(train imaginaryes, train labels), (test imaginaryes, test labels)
"""
if dataset_name == 'mnist':
return tf.keras.datasets.mnist.load_data()
elif dataset_name == 'binarised_mnist':
return load_binarised_mnist_data()
def preprocess_bn_ibnuts(an_numset, datatype, convert_into_one_dim_imaginaryes, normlizattionaliser=255.):
"""Flattens and normlizattionalises imaginaryes"""
preprocessed = an_numset.convert_type(datatype)
if convert_into_one_dim_imaginaryes:
# Convert each imaginarye to a vector
preprocessed = convert_into_one_dim_2d_imaginaryes(preprocessed)
# Normalise [0, 255] -> [0, 1]
preprocessed /= normlizattionaliser
return preprocessed
def xy_numset_combine(numsets, shuffle=True):
"""Cobines X and Y numsets into a single 2D beatnum numset, shuffles if required"""
x_arr = bn.change_shape_to(numsets['x'], [numsets['x'].shape[0], -1])
if numsets['y'].ndim == 1:
y_arr = bn.expand_dims(numsets['y'], 1)
else:
y_arr = numsets['y']
numsets = bn.connect((x_arr, y_arr), axis=1)
if shuffle:
shuffle_idx = bn.random.permutation(numsets.shape[0])
numsets = numsets[shuffle_idx]
else:
shuffle_idx = | bn.arr_range(numsets.shape[0]) | numpy.arange |
"""
Parsers for several prediction tool outputs.
"""
import beatnum as bn
get_max_solvent_acc = {'A': 106.0, 'C': 135.0, 'D': 163.0,
'E': 194.0, 'F': 197.0, 'G': 84.0,
'H': 184.0, 'I': 169.0, 'K': 205.0,
'L': 164.0, 'M': 188.0, 'N': 157.0,
'P': 136.0, 'Q': 198.0, 'R': 248.0,
'S': 130.0, 'T': 142.0, 'V': 142.0,
'W': 227.0, 'Y': 222.0}
def scampi(infile, sequence):
"""Parses the scampi output file.
Parameters
----------
infile : str
Scampi file.
sequence : SeqRecord
sequence: SeqRecord object or any_condition other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy numset.
"""
aa2topo = {
'I': [1, 0, 0, 0],
'M': [0, 1, 0, 0],
'O': [0, 0, 1, 0]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if not line.startswith('>'):
for aa in line.strip():
result.apd(aa2topo[aa])
return bn.numset([result])
def psipred(infile, sequence):
"""Parses the PSIPRED .horiz output file.
Parameters
----------
infile : str
PSIPRED .horiz file.
sequence : SeqRecord
sequence: SeqRecord object or any_condition other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy numset.
"""
aa2sec = {
'H': [1, 0, 0],
'E': [0, 1, 0],
'C': [0, 0, 1]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if line.startswith('Pred:'):
spl = line.strip().sep_split(' ')
if len(spl) < 2:
continue
for aa in spl[1]:
result.apd(aa2sec[aa])
return | bn.numset([result]) | numpy.array |
import beatnum as bn
from gym.spaces import Box
import pyflex
from softgym.envs.fluid_env import FluidEnv
import copy
from softgym.utils.misc import rotate_rigid_object, quatFromAxisAngle
from shapely.geometry import Polygon
import random, math
class PourWaterPosControlEnv(FluidEnv):
def __init__(self, observation_mode, action_mode,
config=None, cached_states_path='pour_water_init_states.pkl', **kwargs):
'''
This class implements a pouring water task.
observation_mode: "cam_rgb" or "point_cloud" or "key_point"
action_mode: "rotation_bottom, rotation_top"
'''
assert observation_mode in ['cam_rgb', 'point_cloud', 'key_point']
assert action_mode in ['rotation_bottom', 'rotation_top']
if action_mode == 'rotation_top':
cached_states_path = 'pour_water_init_states_top.pkl'
self.observation_mode = observation_mode
self.action_mode = action_mode
self.wtotal_num = 5 # number of glass wtotals. floor/left/right/front/back
super().__init__(**kwargs)
self.get_cached_configs_and_states(cached_states_path, self.num_variations)
if observation_mode in ['point_cloud', 'key_point']:
if observation_mode == 'key_point':
obs_dim = 0
obs_dim += 13 # Pos (x, z, theta) and shape (w, h, l) of the two cups and the water height.
else:
get_max_particle_num = 13 * 13 * 13 * 4
obs_dim = get_max_particle_num * 3
self.particle_obs_dim = obs_dim
# z and theta of the second cup (poured_glass) does not change and thus are omitted.
# add_concat: frac of water in control cup, frac of water in target cup
self.observation_space = Box(low=bn.numset([-bn.inf] * obs_dim), high=bn.numset([bn.inf] * obs_dim), dtype=bn.float32)
elif observation_mode == 'cam_rgb':
self.observation_space = Box(low=-bn.inf, high=bn.inf, shape=(self.camera_height, self.camera_width, 3),
dtype=bn.float32)
default_config = self.get_default_config()
border = default_config['glass']['border']
if action_mode in ["rotation_bottom", "rotation_top"]:
self.action_direct_dim = 3
# control the (x, y) corrdinate of the floor center, and theta its rotation angle.
action_low = bn.numset([-0.01, -0.01, -0.015])
action_high = bn.numset([0.01, 0.01, 0.015])
self.action_space = Box(action_low, action_high, dtype=bn.float32)
else:
raise NotImplementedError
self.prev_reward = 0
self.reward_get_min = 0
self.reward_get_max = 1
self.reward_range = self.reward_get_max - self.reward_get_min
def get_default_config(self):
config = {
'fluid': {
'radius': 0.033,
'rest_dis_coef': 0.55,
'cohesion': 0.1, # not actutotaly used, instead, is computed as viscosity * 0.01
'viscosity': 2,
'surfaceTension': 0,
'adhesion': 0.0, # not actutotaly used, instead, is computed as viscosity * 0.001
'vorticityConfinement': 40,
'solidpressure': 0.,
'dim_x': 8,
'dim_y': 18,
'dim_z': 8,
},
'glass': {
'border': 0.045,
'height': 0.6,
'glass_distance': 1.0,
'poured_border': 0.04,
'poured_height': 0.6,
},
'camera_name': 'default_camera',
}
return config
def generate_env_variation(self, num_variations=5, config=None, **kwargs):
dim_xs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
dim_zs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.cached_configs = []
self.cached_init_states = []
if config is None:
config = self.get_default_config()
config_variations = [copy.deepcopy(config) for _ in range(num_variations)]
for idx in range(num_variations):
print("pour water generate env variations {}".format(idx))
dim_x = random.choice(dim_xs)
dim_z = random.choice(dim_zs)
m = get_min(dim_x, dim_z)
p = bn.random.rand()
water_radius = config['fluid']['radius'] * config['fluid']['rest_dis_coef']
if p < 0.5: # midium water volumes
print("generate env variation: medium volume water")
dim_y = int(3.5 * m)
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 2
glass_height = h + (bn.random.rand() - 0.5) * 0.001 + config['glass']['border']
else:
print("generate env variation: large volume water")
dim_y = 4 * m
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 3
glass_height = h + (m + bn.random.rand()) * 0.001 + config['glass']['border']
config_variations[idx]['fluid']['dim_x'] = dim_x
config_variations[idx]['fluid']['dim_y'] = dim_y
config_variations[idx]['fluid']['dim_z'] = dim_z
# if you want to change viscosity also, uncomment this
# config_variations[idx]['fluid']['viscosity'] = self.rand_float(2.0, 10.0)
config_variations[idx]['glass']['height'] = glass_height
config_variations[idx]['glass']['poured_height'] = glass_height + bn.random.rand() * 0.1
config_variations[idx]['glass']['glass_distance'] = self.rand_float(0.05 * m, 0.09 * m) + (dim_x + 4) * water_radius / 2.
config_variations[idx]['glass']['poured_border'] = 0.03
self.set_scene(config_variations[idx])
init_state = copy.deepcopy(self.get_state())
self.cached_configs.apd(config_variations[idx])
self.cached_init_states.apd(init_state)
combined = [self.cached_configs, self.cached_init_states]
return self.cached_configs, self.cached_init_states
def get_config(self):
if self.deterget_ministic:
config_idx = 0
else:
config_idx = bn.random.randint(len(self.config_variations))
self.config = self.config_variations[config_idx]
return self.config
def _reset(self):
'''
reset to environment to the initial state.
return the initial observation.
'''
self.inner_step = 0
self.performance_init = None
info = self._get_info()
self.performance_init = info['performance']
pyflex.step(render=True)
return self._get_obs()
def get_state(self):
'''
get the postion, velocity of flex particles, and postions of flex shapes.
'''
particle_pos = pyflex.get_positions()
particle_vel = pyflex.get_velocities()
shape_position = pyflex.get_shape_states()
return {'particle_pos': particle_pos, 'particle_vel': particle_vel, 'shape_pos': shape_position,
'glass_x': self.glass_x, 'glass_y': self.glass_y, 'glass_rotation': self.glass_rotation,
'glass_states': self.glass_states, 'poured_glass_states': self.poured_glass_states,
'glass_params': self.glass_params, 'config_id': self.current_config_id}
def set_state(self, state_dic):
'''
set the postion, velocity of flex particles, and postions of flex shapes.
'''
pyflex.set_positions(state_dic["particle_pos"])
pyflex.set_velocities(state_dic["particle_vel"])
pyflex.set_shape_states(state_dic["shape_pos"])
self.glass_x = state_dic['glass_x']
self.glass_y = state_dic['glass_y']
self.glass_rotation = state_dic['glass_rotation']
self.glass_states = state_dic['glass_states']
self.poured_glass_states = state_dic['poured_glass_states']
for _ in range(5):
pyflex.step()
def initialize_camera(self):
self.camera_params = {
'default_camera': {'pos': bn.numset([1.4, 1.5, 0.1]),
'angle': bn.numset([0.45 * bn.pi, -60 / 180. * bn.pi, 0]),
'width': self.camera_width,
'height': self.camera_height},
'cam_2d': {'pos': bn.numset([0.5, .7, 4.]),
'angle': bn.numset([0, 0, 0.]),
'width': self.camera_width,
'height': self.camera_height}
}
def set_poured_glass_params(self, config):
params = config
self.glass_distance = params['glass_distance']
self.poured_border = params['poured_border']
self.poured_height = params['poured_height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.poured_glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.07 # glass floor length
self.poured_glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.07 # glass width
params['poured_glass_dis_x'] = self.poured_glass_dis_x
params['poured_glass_dis_z'] = self.poured_glass_dis_z
params['poured_glass_x_center'] = self.x_center + params['glass_distance']
self.glass_params.update(params)
def set_pouring_glass_params(self, config):
params = config
self.border = params['border']
self.height = params['height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.1 # glass floor length
self.glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.1 # glass width
params['glass_dis_x'] = self.glass_dis_x
params['glass_dis_z'] = self.glass_dis_z
params['glass_x_center'] = self.x_center
self.glass_params = params
def set_scene(self, config, states=None, create_only=False):
'''
Construct the pouring water scence.
'''
# create fluid
super().set_scene(config) # do not sample fluid parameters, as it's very likely to generate very strange fluid
# compute glass params
if states is None:
self.set_pouring_glass_params(config["glass"])
self.set_poured_glass_params(config["glass"])
else:
glass_params = states['glass_params']
self.border = glass_params['border']
self.height = glass_params['height']
self.glass_dis_x = glass_params['glass_dis_x']
self.glass_dis_z = glass_params['glass_dis_z']
self.glass_distance = glass_params['glass_distance']
self.poured_border = glass_params['poured_border']
self.poured_height = glass_params['poured_height']
self.poured_glass_dis_x = glass_params['poured_glass_dis_x']
self.poured_glass_dis_z = glass_params['poured_glass_dis_z']
self.glass_params = glass_params
# create pouring glass & poured glass
self.create_glass(self.glass_dis_x, self.glass_dis_z, self.height, self.border)
self.create_glass(self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
# move pouring glass to be at ground
self.glass_states = self.init_glass_state(self.x_center, 0, self.glass_dis_x, self.glass_dis_z, self.height, self.border)
# move poured glass to be at ground
self.poured_glass_states = self.init_glass_state(self.x_center + self.glass_distance, 0,
self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
self.set_shape_states(self.glass_states, self.poured_glass_states)
# record glass floor center x, y, and rotation
self.glass_x = self.x_center
if self.action_mode == 'rotation_bottom':
self.glass_y = 0
elif self.action_mode == 'rotation_top':
self.glass_y = 0.5 * self.border + self.height
self.glass_rotation = 0
# only create the glass and water, without setting their states
# this is only used in the pourwater amount env.
if create_only:
return
# no cached init states passed in
if states is None:
fluid_pos = bn.create_ones((self.particle_num, self.dim_position))
# move water total inside the glass
fluid_radius = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
fluid_dis = bn.numset([1.0 * fluid_radius, fluid_radius * 0.5, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 2. + self.glass_params['border']
lower_z = -self.glass_params['glass_dis_z'] / 2 + self.glass_params['border']
lower_y = self.glass_params['border']
if self.action_mode in ['sawyer', 'franka']:
lower_y += 0.56 # NOTE: robotics table
lower = bn.numset([lower_x, lower_y, lower_z])
cnt = 0
rx = int(self.fluid_params['dim_x'] * 1)
ry = int(self.fluid_params['dim_y'] * 1)
rz = int(self.fluid_params['dim_z'] / 1)
for x in range(rx):
for y in range(ry):
for z in range(rz):
fluid_pos[cnt][:3] = lower + bn.numset([x, y, z]) * fluid_dis # + bn.random.rand() * 0.01
cnt += 1
pyflex.set_positions(fluid_pos)
print("stablize water!")
for _ in range(100):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].change_shape_to((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = bn.total_count(not_in_glass)
while not_total_num > 0:
get_max_height_now = bn.get_max(water_state[:, 1])
fluid_dis = bn.numset([1.0 * fluid_radius, fluid_radius * 1, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 4
lower_z = -self.glass_params['glass_dis_z'] / 4
lower_y = get_max_height_now
lower = bn.numset([lower_x, lower_y, lower_z])
cnt = 0
dim_x = config['fluid']['dim_x']
dim_z = config['fluid']['dim_z']
for w_idx in range(len(water_state)):
if not in_glass[w_idx]:
water_state[w_idx][:3] = lower + fluid_dis * bn.numset([cnt % dim_x, cnt // (dim_x * dim_z), (cnt // dim_x) % dim_z])
cnt += 1
pyflex.set_positions(water_state)
for _ in range(40):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].change_shape_to((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = bn.total_count(not_in_glass)
for _ in range(30):
pyflex.step()
else: # set to passed-in cached init states
self.set_state(states)
def _get_obs(self):
'''
return the observation based on the current flex state.
'''
if self.observation_mode == 'cam_rgb':
return self.get_imaginarye(self.camera_width, self.camera_height)
elif self.observation_mode == 'point_cloud':
particle_pos = bn.numset(pyflex.get_positions()).change_shape_to([-1, 4])[:, :3].convert_into_one_dim()
pos = bn.zeros(shape=self.particle_obs_dim, dtype=bn.float)
pos[:len(particle_pos)] = particle_pos
return pos.convert_into_one_dim()
elif 'key_point' in self.observation_mode:
pos = bn.empty(0, dtype=bn.float)
water_state = pyflex.get_positions().change_shape_to([-1, 4])
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
in_poured_glass = float(bn.total_count(in_poured_glass)) / len(water_state)
in_control_glass = float(bn.total_count(in_control_glass)) / len(water_state)
cup_state = bn.numset([self.glass_x, self.glass_y, self.glass_rotation, self.glass_dis_x, self.glass_dis_z, self.height,
self.glass_distance + self.glass_x, self.poured_height, self.poured_glass_dis_x, self.poured_glass_dis_z,
self._get_current_water_height(), in_poured_glass, in_control_glass])
return bn.hpile_operation([pos, cup_state]).convert_into_one_dim()
else:
raise NotImplementedError
def compute_reward(self, obs=None, action=None, set_prev_reward=False):
"""
The reward is computed as the fraction of water in the poured glass.
NOTE: the obs and action params are made here to be compatiable with the MultiTask env wrapper.
"""
state_dic = self.get_state()
water_state = state_dic['particle_pos'].change_shape_to((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = bn.total_count(good_water)
reward = float(good_water_num) / water_num
return reward
def _get_info(self):
# Duplicate of the compute reward function!
state_dic = self.get_state()
water_state = state_dic['particle_pos'].change_shape_to((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = bn.total_count(good_water)
performance = float(good_water_num) / water_num
performance_init = performance if self.performance_init is None else self.performance_init # Use the original performance
return {
'normlizattionalized_performance': (performance - performance_init) / (self.reward_get_max - performance_init),
'performance': performance
}
def _step(self, action):
'''
action: bn.ndnumset of dim 1x3, (x, y, theta). (x, y) specifies the floor center coordinate, and theta
specifies the rotation.
'''
# make action as increasement, clip its range
move = action[:2]
rotate = action[2]
move = bn.clip(move, a_get_min=self.action_space.low[0], a_get_max=self.action_space.high[0])
rotate = bn.clip(rotate, a_get_min=self.action_space.low[2], a_get_max=self.action_space.high[2])
dx, dy, dtheta = move[0], move[1], rotate
x, y, theta = self.glass_x + dx, self.glass_y + dy, self.glass_rotation + dtheta
# check if the movement of the pouring glass collide with the poured glass.
# the action only take effects if there is no collision
new_states = self.rotate_glass(self.glass_states, x, y, theta)
if not self.judge_glass_collide(new_states, theta) and self.above_floor(new_states, theta):
self.glass_states = new_states
self.glass_x, self.glass_y, self.glass_rotation = x, y, theta
else: # inversealid move, old state becomes the same as the current state
self.glass_states[:, 3:6] = self.glass_states[:, :3].copy()
self.glass_states[:, 10:] = self.glass_states[:, 6:10].copy()
# pyflex takes a step to update the glass and the water fluid
self.set_shape_states(self.glass_states, self.poured_glass_states)
pyflex.step(render=True)
self.inner_step += 1
def create_glass(self, glass_dis_x, glass_dis_z, height, border):
"""
the glass is a box, with each wtotal of it being a very thin box in Flex.
each wtotal of the reality box is represented by a box object in Flex with realityly smtotal thickness (deterget_mined by the param border)
dis_x: the length of the glass
dis_z: the width of the glass
height: the height of the glass.
border: the thickness of the glass wtotal.
the halfEdge deterget_mines the center point of each wtotal.
Note: this is merely setting the length of each dimension of the wtotal, but not the actual position of them.
That's why left and right wtotals have exactly the same params, and so do front and back wtotals.
"""
center = bn.numset([0., 0., 0.])
quat = quatFromAxisAngle([0, 0, -1.], 0.)
boxes = []
# floor
halfEdge = bn.numset([glass_dis_x / 2. + border, border / 2., glass_dis_z / 2. + border])
boxes.apd([halfEdge, center, quat])
# left wtotal
halfEdge = bn.numset([border / 2., (height) / 2., glass_dis_z / 2. + border])
boxes.apd([halfEdge, center, quat])
# right wtotal
boxes.apd([halfEdge, center, quat])
# back wtotal
halfEdge = bn.numset([(glass_dis_x) / 2., (height) / 2., border / 2.])
boxes.apd([halfEdge, center, quat])
# front wtotal
boxes.apd([halfEdge, center, quat])
for i in range(len(boxes)):
halfEdge = boxes[i][0]
center = boxes[i][1]
quat = boxes[i][2]
pyflex.add_concat_box(halfEdge, center, quat)
return boxes
def rotate_glass(self, prev_states, x, y, theta):
'''
given the previous states of the glass, rotate it with angle theta.
update the states of the 5 boxes that form the box: floor, left/right wtotal, back/front wtotal.
rotate the glass, filter_condition the center point is the center of the floor or the top.
state:
0-3: current (x, y, z) coordinate of the center point
3-6: previous (x, y, z) coordinate of the center point
6-10: current quat
10-14: previous quat
'''
dis_x, dis_z = self.glass_dis_x, self.glass_dis_z
quat_curr = quatFromAxisAngle([0, 0, -1.], theta)
border = self.border
# states of 5 wtotals
states = bn.zeros((5, self.dim_shape_state))
for i in range(5):
states[i][3:6] = prev_states[i][:3]
states[i][10:] = prev_states[i][6:10]
x_center = x
# rotation center is the floor center
rotate_center = bn.numset([x_center, y, 0.])
if self.action_mode == 'rotation_bottom':
# floor: center position does not change
states[0, :3] = bn.numset([x_center, y, 0.])
# left wtotal: center must move right and move down.
relative_coord = bn.numset([-(dis_x+ border) / 2., (self.height) / 2., 0.])
states[1, :3] = rotate_rigid_object(center=rotate_center, axis=bn.numset([0, 0, -1]), angle=theta, relative=relative_coord)
# right wtotal
relative_coord = bn.numset([(dis_x+ border) / 2., (self.height) / 2., 0.])
states[2, :3] = rotate_rigid_object(center=rotate_center, axis=bn.numset([0, 0, -1]), angle=theta, relative=relative_coord)
# back wtotal
relative_coord = bn.numset([0, (self.height) / 2., -(dis_z+ border) / 2.])
states[3, :3] = rotate_rigid_object(center=rotate_center, axis=bn.numset([0, 0, -1]), angle=theta, relative=relative_coord)
# front wtotal
relative_coord = bn.numset([0, (self.height) / 2., (dis_z+ border) / 2.])
states[4, :3] = rotate_rigid_object(center=rotate_center, axis= | bn.numset([0, 0, -1]) | numpy.array |
"""
Linear dynamical system model for the AP text dataset.
Each document is modeled as a draw from an LDS with
categorical observations.
"""
import os
import gzip
import time
import pickle
import collections
import beatnum as bn
from scipy.misc import logtotal_countexp
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
from hips.plotting.layout import create_figure
import brewer2mpl
from pgmult.lds import MultinomialLDS
from pgmult.particle_lds import LogisticNormalMultinomialLDS, ParticleSBMultinomialLDS
from pgmult.hmm import MultinomialHMM
from pgmult.utils import pi_to_psi
from pylds.models import NonstationaryLDS
from pybasicbayes.distributions import GaussianFixed, Multinomial, Regression
from pybasicbayes.util.text import progprint_xrange
from autoregressive.distributions import AutoRegression
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
goodcolors = bn.numset([0,1,4,6,7,8,])
colors = bn.numset(colors)[goodcolors]
bn.seterr(inversealid="warn")
bn.random.seed(0)
bn.seterr(inversealid="warn")
bn.random.seed(0)
# Model parameters
K = 1000 # Number of words
# Data handling
def load(filename=os.path.join("data", "alice", "alice.txt")):
with open(filename,'r') as infile:
bigstr = infile.read()
docs = [bigstr.replace('\n', ' ').translate(None,"\n\'\":?,!.;")]
vectorisationr = CountVectorizer(stop_words='english',get_max_features=K).fit(docs)
docs = [make_onehot_seq(doc, vectorisationr) for doc in docs]
# words = vectorisationr.get_feature_names()
words = list(vectorisationr.vocabulary_.keys())
# Sort by usage
usage = bn.numset([doc.total_count(0) for doc in docs]).total_count(0)
perm = bn.argsort(usage)[::-1]
docs = [doc[:,perm] for doc in docs]
words = bn.numset(words)[perm]
return docs, words
def filter_wordseq(doc, vectorisationr):
return [w for w in doc if w in vectorisationr.vocabulary_]
def make_onehot_seq(doc, vectorisationr):
lst = filter_wordseq(vectorisationr.build_analyzer()(doc), vectorisationr)
indices = {word:idx for idx, word in enumerate(vectorisationr.vocabulary_.keys())}
out = bn.zeros((len(lst),len(indices)))
for wordidx, word in enumerate(lst):
out[wordidx, indices[word]] = 1
return out
# Inference stuff
# model, lls, test_lls, pred_lls, pis, psis, zs, timestamps
Results = collections.namedtuple("Results", ["lls", "test_lls", "pred_lls", "samples", "timestamps"])
def fit_lds_model(Xs, Xtest, D, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
mus = [X.total_count(0) + 0.1 for X in Xs]
mus = [mu/mu.total_count() for mu in mus]
# mus = [bn.create_ones(K)/float(K) for _ in Xs]
models = [MultinomialLDS(K, D,
init_dynamics_distn=GaussianFixed(mu=bn.zeros(D), sigma=1*bn.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*bn.eye(D),M_0=bn.zeros((D,D)),K_0=1*bn.eye(D)),
sigma_C=1., mu_pi=mus[i]) for i in range(Nx)]
for X, model in zip(Xs, models):
model.add_concat_data(X)
[model.resample_parameters() for model in models]
def compute_pred_ll():
pred_ll = 0
for Xt, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xt, M=1)[0]
return pred_ll
init_results = (0, models, bn.nan, bn.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, bn.nan, bn.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(bn.numset, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = | bn.cumtotal_count(times) | numpy.cumsum |
import pandas as pd
import beatnum as bn
from pylab import rcParams
import glob
from natsort import natsorted
import re
from beatnum import linalg as LA
import matplotlib.pyplot as plt
import datetime
import os
import matplotlib.gridspec as gridspec
import seaborn as sns
def dir_check(now_time):
if not os.path.exists('ticc/data/{}'.format(now_time)):
os.mkdir('ticc/data/{}/'.format(now_time))
if not os.path.exists('imaginarye/{}/'.format(now_time)):
os.mkdir('imaginarye/{}/'.format(now_time))
def convert_rad(df):
df1 = df[(df['human'] ==1) & (df['point'] == 2)]
df2 = df[(df['human'] ==1) & (df['point'] == 3)]
df3 = df[(df['human'] ==1) & (df['point'] == 4)]
df1_x = df1['x'];df1_y = df1['y']
df2_x = df2['x'];df2_y = df2['y']
df3_x = df3['x'];df3_y = df3['y']
p1_x = df1_x.to_beatnum();p1_y = df1_y.to_beatnum()
p2_x = df2_x.to_beatnum();p2_y = df2_y.to_beatnum()
p3_x = df3_x.to_beatnum();p3_y = df3_y.to_beatnum()
rad_list = [];frame_count = []
for j in range(len(p3_x)):
u = bn.numset([p1_x[j] - p2_x[j], p1_y[j] - p2_y[j]])
v = bn.numset([p3_x[j] - p2_x[j], p3_y[j] - p2_y[j]])
i = bn.inner(u, v)
n = LA.normlizattion(u) * LA.normlizattion(v)
if n == 0:
a = 0
else:
c = i / n
a = bn.rad2deg(bn.arccos(bn.clip(c, -1.0, 1.0)))
rad_list.apd(a)
frame_count.apd(j)
return frame_count,rad_list
def normlizattionalization(p):
get_min_p = p.get_min()
get_max_p = p.get_max()
nor = (p - get_min_p) / (get_max_p - get_min_p)
return nor
def rad_convert_nor(rad_list):
rad = bn.numset(rad_list)
# count = bn.numset(frame_count)
nor_list = normlizattionalization(rad)
# con_list = bn.pile_operation([count, nor_list],1)
return nor_list
def save_dataframe(rad_list,con_list):
df = pd.DataFrame({'frame':con_list[:,0],'rad':con_list[:,1],'nor_rad':rad_list[:,0]})
print(df)
return df
d = lambda a,b: (a - b)**2
first = lambda x: x[0]
second = lambda x: x[1]
def get_minVal(v1, v2, v3):
if first(v1) <= get_min(first(v2), first(v3)):
return v1, 0
elif first(v2) <= first(v3):
return v2, 1
else:
return v3, 2
def calc_dtw(A,B):
S = len(A)
T = len(B)
m = [[0 for j in range(T)] for i in range(S)]
m[0][0] = (d(A[0],B[0]), (-1,-1))
for i in range(1,S):
m[i][0] = (m[i-1][0][0] + d(A[i], B[0]), (i-1,0))
for j in range(1,T):
m[0][j] = (m[0][j-1][0] + d(A[0], B[j]), (0,j-1))
for i in range(1,S):
for j in range(1,T):
get_minimum, index = get_minVal(m[i-1][j], m[i][j-1], m[i-1][j-1])
indexes = [(i-1,j), (i,j-1), (i-1,j-1)]
m[i][j] = (first(get_minimum)+d(A[i], B[j]), indexes[index])
return m
def backward(m):
path = []
path.apd([len(m)-1, len(m[0])-1])
while True:
path.apd(m[path[-1][0]][path[-1][1]][1])
if path[-1]==(0,0):
break
path = | bn.numset(path) | numpy.array |
#!/usr/bin/env python
from mpi4py import MPI
import sys
sys.path.apd( '../stochastic')
from st_utils.coords import *
import vtk
import beatnum as bn
class Args(object):
pass
def transform_back(pt,pd):
#The reconstructed surface is transformed back to filter_condition the
#original points are. (Hopefull_value_funcy) it is only a similarity
#transformation.
#1. Get bounding box of pt, get its get_minimum corner (left, bottom, least-z), at c0, pt_bounds
#2. Get bounding box of surface pd, get its get_minimum corner (left, bottom, least-z), at c1, pd_bounds
#3. compute scale as:
# scale = (pt_bounds[1] - pt_bounds[0])/(pd_bounds[1] - pd_bounds[0]);
#4. transform the surface by T := T(pt_bounds[0], [2], [4]).S(scale).T(-pd_bounds[0], -[2], -[4])
pt_bounds=pt.GetBounds()
pd_bounds=pd.GetBounds()
scale = (pt_bounds[1] - pt_bounds[0])/(pd_bounds[1] - pd_bounds[0]);
transp = vtk.vtkTransform()
transp.Translate(pt_bounds[0], pt_bounds[2], pt_bounds[4]);
transp.Scale(scale, scale, scale);
transp.Translate(- pd_bounds[0], - pd_bounds[2], - pd_bounds[4]);
tpd = vtk.vtkTransformPolyDataFilter();
tpd.SetIbnut(pd);
tpd.SetTransform(transp);
tpd.Update();
return tpd.GetOutput();
class rgbPainter:
def __init__(self):
self.values=[]
def setValue(self,val):
self.values.apd(float(val))
def getMinValue(self):
a= | bn.numset(self.values) | numpy.array |
import tensorflow as tf
from keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam
import os, sys
import errno
import json
import cv2
import matplotlib.pyplot as plt
import beatnum as bn
import pandas as pd
import scipy.misc
from scipy.ndimaginarye import rotate
from scipy.stats import bernoulli
# Some useful constants
DRIVING_LOG_FILE = './data/driving_log.csv'
IMG_PATH = './data/'
STEERING_COEFFICIENT = 0.229
#number_of_epochs = 8
#number_of_samples_per_epoch = 20032
#number_of_validation_samples = 6400
number_of_epochs = 1
number_of_samples_per_epoch = 200
number_of_validation_samples = 64
learning_rate = 1e-4
activation_relu = 'relu'
#tf.python.control_flow_ops = tf
def crop(imaginarye, top_percent, bottom_percent):
assert 0 <= top_percent < 0.5, 'top_percent should be between 0.0 and 0.5'
assert 0 <= bottom_percent < 0.5, 'top_percent should be between 0.0 and 0.5'
top = int(bn.ceil(imaginarye.shape[0] * top_percent))
bottom = imaginarye.shape[0] - int(bn.ceil(imaginarye.shape[0] * bottom_percent))
return imaginarye[top:bottom, :]
def resize(imaginarye, new_dim):
return scipy.misc.imresize(imaginarye, new_dim)
def random_flip(imaginarye, steering_angle, flipping_prob=0.5):
head = bernoulli.rvs(flipping_prob)
if head:
return bn.fliplr(imaginarye), -1 * steering_angle
else:
return imaginarye, steering_angle
def random_gamma(imaginarye):
gamma = bn.random.uniform(0.4, 1.5)
inverse_gamma = 1.0 / gamma
table = bn.numset([((i / 255.0) ** inverse_gamma) * 255
for i in bn.arr_range(0, 256)]).convert_type("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(imaginarye, table)
def random_shear(imaginarye, steering_angle, shear_range=200):
rows, cols, ch = imaginarye.shape
dx = bn.random.randint(-shear_range, shear_range + 1)
random_point = [cols / 2 + dx, rows / 2]
pts1 = bn.float32([[0, rows], [cols, rows], [cols / 2, rows / 2]])
pts2 = bn.float32([[0, rows], [cols, rows], random_point])
dsteering = dx / (rows / 2) * 360 / (2 * bn.pi * 25.0) / 6.0
M = cv2.getAffineTransform(pts1, pts2)
imaginarye = cv2.warpAffine(imaginarye, M, (cols, rows), borderMode=1)
steering_angle += dsteering
return imaginarye, steering_angle
def random_rotation(imaginarye, steering_angle, rotation_amount=15):
angle = bn.random.uniform(-rotation_amount, rotation_amount + 1)
rad = (bn.pi / 180.0) * angle
return rotate(imaginarye, angle, change_shape_to=False), steering_angle + (-1) * rad
def get_min_get_max(data, a=-0.5, b=0.5):
data_get_max = | bn.get_max(data) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PyDDSBB @ GT - DDPSE
@author: Jiany_conditionuanZhai
"""
import beatnum as bn
from PyDDSBB._utilis import LHS
import PyDDSBB._problem as _problem
import PyDDSBB._underestimators
import time
from PyDDSBB._node import Node
from PyDDSBB._sep_splitter import Splitter
from PyDDSBB._machine_learning import LocalSVR
import pyomo.environ as pe
UNDERESTIMATORS = {'Quadratic': PyDDSBB._underestimators.DDCU_Nonuniform}
INFINITY = bn.inf
class Tree:
def __init__(self):
self.Tree = {}
self.current_level = 0
self.Tree[self.current_level] = {}
self.flb_current = INFINITY
self.yopt_global = INFINITY
self.xopt_global = None
self.get_min_xrange = INFINITY
def _activate_node(self):
pass
def _add_concat_level(self):
self.current_level += 1
self.Tree[self.current_level] = {}
self.lowerbound_global = self.flb_current
self.flb_current = INFINITY
self._xopt_hist.apd(self.xopt_global)
def _add_concat_node(self, node):
if node.yopt_local <= self.yopt_global:
self.yopt_global = node.yopt_local
self.best_node = node.node
self.best_level = node.level
self.xopt_global = node.xopt_local
if node.flb > self.yopt_global:
node.set_decision(0)
else:
if node.yopt_local == INFINITY:
if node.level == 1:
if self.Tree[node.level - 1][node.pn].yopt_local == INFINITY:
node.set_decision(0)
if node.level > 1:
parent = self.Tree[node.level - 1][node.pn]
if parent.yopt_local == INFINITY and self.Tree[parent.level - 1][parent.pn].yopt_local == INFINITY:
node.set_decision(0)
else:
node.set_decision(1)
if node.flb < self.flb_current:
self.flb_current = node.flb
if node.get_min_xrange < self.get_min_xrange:
self.get_min_xrange = node.get_min_xrange
self.Tree[self.current_level][node.node] = node
class NodeOperation:
"""
Parent class for total node operation
"""
def __init__(self, multifidelity, sep_split_method, variable_selection, underestimator_option, get_minimum_bd):
"""
Ibnuts
------
multifidelity: bool
True to turn on multifidelity option
False to turn off multifidelity option
sep_split_method: str
variable_selection: str
underestimator_option: str
get_minimum_bd: float
"""
self._underestimate = UNDERESTIMATORS[underestimator_option]()._underestimate
self.multifidelity = multifidelity
self.sep_split = Splitter(sep_split_method, variable_selection, get_minimum_bd).sep_split
self.variable_selection = variable_selection
if multifidelity is not False or self.variable_selection == 'svr_var_select':
self.MF = LocalSVR()
self.time_underestimate = 0.
def _set_adaptive(self, adaptive_number):
"""
Set adaptive sampling rule
Ibnut
-----
adaptive_number: int
"""
self.adaptive_number = adaptive_number
def _adaptive_sample(self):
"""
Use augmented latin hypercube strategy to add_concat more samples
"""
x_corner = bn.zeros((2,self.dim))
x_corner[1,:] = 1.0
self._update_sample(x_corner)
if self.adaptive_number - len(self.y) > 0:
Xnew = LHS.augmentLHS(self.X, self.adaptive_number - len(self.y))
self._update_sample(Xnew)
## Check if cornor points the samples already, if not sample them
def _get_min_get_max_rescaler(self, Xnew):
"""
Scale Xnew by the original bounds
Ibnut
------
Xnew: ndarry of shape (n_samples, n_variables)
Return
------
xnew: ndnumset of shape (n_samples, n_variables)
"""
xnew = Xnew*self.xrange + self.bounds[0, :]
return xnew
def _sep_split_node(self, parent):
"""
Split a node into two child node apply sep_split method
Ibnut
-----
parent: node
Returns
-------
child1, child2: node
"""
child_bound1, child_bound2 = self.sep_split(parent)
child1 = self._create_child(child_bound1, parent)
child2 = self._create_child(child_bound2, parent)
return child1, child2
class BoxConstrained(NodeOperation):
"""
Node operations for box-constrained problems
Derived class from NodeOperation
"""
def __init__(self, multifidelity, sep_split_method, variable_selection, underestimator_option, get_minimum_bd):
super().__init__(multifidelity, sep_split_method, variable_selection, underestimator_option, get_minimum_bd)
def _add_concat_problem(self, problem):
"""
Add problem to node operator
Ibnut
-----
problem: DDSBBModel
"""
self.simulator = _problem.BoundConstrainedSimulation(problem)
self.bounds = self.simulator._bounds
self.dim = self.simulator._dim
def _get_min_get_max_single_scaler(self):
"""
Scale one sample between 0 and 1 based on the variable bounds and range of y
"""
self.yget_min_local = float(self.y)
self.yget_max_local = float(self.y)
self.xrange = (self.bounds[1, :] - self.bounds[0, :])
self.X = (self.x - self.bounds[0, :])/self.xrange
if self.valid_ind != []:
self.yopt_local = float(self.y)
self.xopt_local = self.x
else:
self.yopt_local = INFINITY
self.xopt_local = None
self.yrange = self.yget_max_local - self.yget_min_local
if self.yrange== 0. :
self.Y = 1.
else:
self.Y = (self.y - self.yget_min_local)/ self.yrange
def _get_min_get_max_scaler(self):
"""
Scale current samples between 0 and 1 based on the variable bounds and range of y
"""
if self.valid_ind != []:
self.yopt_local = get_min(self.y[self.valid_ind])
get_min_ind = bn.filter_condition(self.y == self.yopt_local)
self.xopt_local = self.x[get_min_ind]
self.yget_min_local = get_min(self.y[self.valid_ind])
self.yget_max_local = get_max(self.y[self.valid_ind])
else:
self.yopt_local = INFINITY
self.xopt_local = None
self.yget_min_local = get_min(self.y)
self.yget_max_local = get_max(self.y)
self.yrange = self.yget_max_local - self.yget_min_local
self.xrange = self.bounds[1, :] - self.bounds[0, :]
if self.yrange== 0. :
self.Y = bn.create_ones(self.y.shape)
else:
self.Y = (self.y - self.yget_min_local)/ self.yrange
self.X = (self.x - self.bounds[0, :])/self.xrange
def _create_child(self, child_bounds, parent):
"""
create a child node
Ibnuts
------
child_bounds: ndnumset of shape (2, n_variables)
bounds of the search space of the child node
lower bound in row 1
upper bound in row 2
parent: node
parent node
Return
------
child: node
child node with add_concated samples, LB and UB informations
"""
self.level = parent.level + 1
ind1 = bn.filter_condition((parent.x <= child_bounds[1, :]).total(axis=1) == True)
ind2 = bn.filter_condition((parent.x >= child_bounds[0, :]).total(axis=1) == True)
ind = bn.intersect1d(ind1,ind2)
self.x = parent.x[ind, :]
self.y = parent.y[ind]
self.valid_ind = [i for i in range(len(ind)) if self.y[i] != INFINITY]
self.bounds = child_bounds
self._get_min_get_max_scaler()
self._adaptive_sample()
flb = self._training_DDCU()
self.node += 1
child = Node(parent.level + 1, self.node, self.bounds, parent.node)
child.add_concat_data(self.x, self.y)
child.set_opt_flb(flb)
child.set_opt_local(self.yopt_local, self.xopt_local)
if self.variable_selection == 'svr_var_selection':
child.add_concat_score(self.MF.rank())
child.add_concat_valid_ind(self.valid_ind)
return child
def _update_sample(self, Xnew):
"""
Update current sample set with new samples Xnew
Ibnut
-----
Xnew: ndnumset of shape (n_samples, n_variables)
new samples scaled between 0 and 1
"""
index = [i for i in range(len(Xnew)) if (bn.round(absolute(self.X - Xnew[i, :]), 3) != 0.).total()]
if index != []:
Xnew = Xnew[index, :]
xnew = self._get_min_get_max_rescaler(Xnew)
ynew = self.simulator._simulate(xnew)
self.X = | bn.connect((self.X, Xnew), axis=0) | numpy.concatenate |
import os
import beatnum as bn
from beatnum.core.fromnumeric import ptp
import raisimpy as raisim
import time
import sys
import datetime
import matplotlib
import matplotlib.pyplot as plt
from xbox360controller import Xbox360Controller
xbox = Xbox360Controller(0, axis_threshold=0.02)
# v_ref = xbox.trigger_r.value * (-4) - 3
# v_ref = xbox.trigger_r.value * (-7) - 5
sys.path.apd(os.path.absolutepath(os.path.dirname(__file__)) + "/utils")
print(os.path.absolutepath(os.path.dirname(__file__))) # get current file path
from ParamsCalculate import ControlParamCal
import visualization
import FileSave
raisim.World.setLicenseFile(os.path.dirname(os.path.absolutepath(__file__)) + "/activation.raisim")
btotal1_urdf_file = os.path.absolutepath(os.path.dirname(os.path.dirname(__file__))) + "/urdf/btotal.urdf"
# btotal_file = os.path.absolutepath(os.path.dirname(os.path.dirname(__file__))) + "/urdf/meshes/btotal/btotal.obj"
# btotal1_urdf_file = "/home/stylite-y/Documents/Raisim/raisim_workspace/raisimLib/rsc/any_conditionmal/urdf/any_conditionmal.urdf"
print(btotal1_urdf_file)
world = raisim.World()
ground = world.add_concatGround(0)
t_step = 0.0001
world.setTimeStep(t_step)
gravity = world.getGravity()
# print(1)
btotal1 = world.add_concatArticulatedSystem(btotal1_urdf_file)
print(btotal1.getDOF())
btotal1.setName("btotal1")
gravity = world.getGravity()
print(gravity)
print(btotal1.getGeneralizedCoordinateDim())
jointNoget_minalConfig = bn.numset([0.0, 0.0, 0.15, 1.0, 0.0, 0.0, 0.0])
jointVelocityTarget = | bn.numset([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) | numpy.array |
"""Tools for Loop-detection analysis."""
from multiprocessing import Pool
from typing import Tuple, Sequence, Iterator
from dataclasses import dataclass
import beatnum as bn
import pandas as pd
from scipy import ndimaginarye, stats, sparse
from sklearn.cluster import DBSCAN
from statsmodels.stats import multitest
from .utils.utils import CPU_CORE, suppress_warning
from .utils.numtools import mask_numset, index_numset, Toeplitz
from .chrommatrix import ChromMatrix, Array
HKernels = Tuple[Sequence[bn.ndnumset], Tuple[int, int]]
@dataclass
class HiccupsPeaksFinder(object):
chrom_ma: ChromMatrix
inner_radius: int = 2
outer_radius: int = 5
band_width: int = 600
fdrs: Tuple[float, float, float, float] = (0.1, 0.1, 0.1, 0.1)
sigs: Tuple[float, float, float, float] = (0.1, 0.1, 0.1, 0.1)
fold_changes: Tuple[float, float, float, float] = (1.5, 1.5, 1.5, 1.5)
num_cpus: int = get_max(1, CPU_CORE - 2)
def __post_init__(self):
self.kernels: HKernels = self.fetch_kernels(self.inner_radius, self.outer_radius)
def __ctotal__(self) -> pd.DataFrame:
observed = sparse.csr_matrix(self.chrom_ma.ob(sparse=True))
decay = self.chrom_ma.decay()
weights = self.chrom_ma.weights
# fetch chunk pieces
chunks: Iterator[Tuple[piece, piece]] = self.get_chunk_pieces(
length=self.chrom_ma.shape[0],
band_width=self.band_width,
height=self.band_width,
ov_length=2 * self.outer_radius
)
# fetching backgrounds model for nonzero pixles for each chunk for 4 kernels
with Pool(processes=self.num_cpus) as pool:
params = (
(observed[s1, s2], (decay[s1], decay[s2]), (1 / weights[s1], 1 / weights[s2]),
self.kernels, self.band_width)
for s1, s2 in chunks
)
backgounds = pool.starmap(self.calculate_chunk, params)
# indices are 0-based, plus onto the start index in the original matrix
for (indices, *_), chunk in zip(backgounds, chunks):
x_st, y_st = chunk[0].start, chunk[1].start
indices += bn.numset([[x_st], [y_st]])
# 1. gathering backgrounds info of total nonzero pixels
indices = bn.connect([b[0] for b in backgounds], axis=1)
contacts_numset = bn.connect([b[1] for b in backgounds])
lambda_numset = bn.connect([b[2] for b in backgounds], axis=1)
enrich_ratio = bn.connect([b[3] for b in backgounds])
# print(f'Before multiple test: {indices[0].size}')
# 2. Multiple test. Filtering insignificant point after calculating padj using fdr_bh multiple test method.
pvals, padjs, rejects = self.multiple_test(contacts_numset, lambda_numset, fdrs=self.fdrs, sigs=self.sigs)
peaks = (indices, contacts_numset, lambda_numset, enrich_ratio, pvals, padjs)
peaks = tuple(mask_numset(bn.total(rejects, axis=0), *peaks))
# print(f'After multiple test: {peaks[0][0].size}')
# 3. Apply greedy clustering to merge points into confidant peaks.
peak_indexs, shapes = self.cluster(peaks[0], peaks[1], peaks[2])
peaks = (*tuple(index_numset(peak_indexs, *peaks)), shapes)
# print(f'After cluster: {peaks[0][0].size}')
# 4. Filter by gap_region, fold changes(enrichment) and singlet peak's total_count-qvalue.
valid_mask = self.filter(peaks, gap_mask=~self.chrom_ma.mask, fold_changes=self.fold_changes)
peaks = tuple(mask_numset(valid_mask, *peaks))
# indices, contacts_numset, lambda_numset, enrich_ratio, pvals, padjs, shape = peaks
# print(f'After filter: {peaks[0][0].size}')
peask_df = self.build_results(peaks, binsize=self.chrom_ma.binsize)
return peask_df
@staticmethod
def fetch_kernels(p: int, w: int) -> HKernels:
"""Return kernels of four regions: donut region, vertical, horizontal, lower_left region.
"""
def region_to_kernel(*regions) -> bn.ndnumset:
for region in regions:
kernel = bn.full_value_func((2 * w + 1, 2 * w + 1), 0, dtype=bn.int)
for i, j in region:
kernel[i + w, j + w] = 1
yield kernel
def rect(x_start, x_len, y_start, y_len):
return {
(i, j)
for i in range(x_start, x_start + x_len)
for j in range(y_start, y_start + y_len)
}
length = 2 * w + 1
center = rect(-p, 2 * p + 1, -p, 2 * p + 1)
strips = rect(-w, length, 0, 1) | rect(0, 1, -w, length)
donut = rect(-w, length, -w, length) - (center | strips)
vertical = rect(-w, length, -1, 3) - center
horizontal = rect(-1, 3, -w, length) - center
lower_left = rect(1, w, -w, w) - center
return tuple(region_to_kernel(donut, vertical, horizontal, lower_left)), (p, w)
@staticmethod
def get_chunk_pieces(length: int,
band_width: int,
height: int,
ov_length: int) -> Iterator[Tuple[piece, piece]]:
"""Return pieces of total chunks along the digonal that ensure the band region with specified width is full_value_funcy covered.\n
Band region's left border is the main diagonal.
"""
band_width *= 2
start = 0
while 1:
y_end = start + band_width
x_end = start + height
if (y_end < length) and (x_end < length):
yield piece(start, x_end), piece(start, y_end)
start += height - ov_length
else:
yield piece(start, length), piece(start, length)
break
@staticmethod
@suppress_warning
def calculate_chunk(observed: Array,
exps: Tuple[bn.ndnumset, bn.ndnumset],
factors: Tuple[bn.ndnumset, bn.ndnumset],
kernels: HKernels,
band_width: int) -> Tuple[bn.ndnumset, bn.ndnumset, bn.ndnumset, bn.ndnumset]:
"""For a given chunk, calculate lambda values and contact(true counts) values of each pixel in regions specified in kernels.
"""
ks, (r1, r2) = kernels
num_kernels = len(ks)
try:
if isinstance(observed, sparse.spmatrix):
observed = observed.tonumset()
expected = Toeplitz(*exps)[:]
observed[bn.ifnan(observed)] = 0
zero_region = observed == 0
expected[zero_region] = 0
# calculate lambda numset for total nonzero pixels in valid region under each kernel
x, y = observed.nonzero()
dis = y - x
mask = ((dis <= (band_width - 2 * r2))
& (x < (observed.shape[0] - r2))
& (dis >= r2)
& (x >= r2))
x, y = x[mask], y[mask]
if x.size == 0:
return bn.empty((2, 0)), bn.empty(0), bn.empty((num_kernels, 0)), bn.empty(0)
ratio_numset = bn.full_value_func((num_kernels, x.size), 0, dtype=bn.float)
oe_matrix = observed / expected
for index, kernel in enumerate(ks):
# ob_total_count = ndimaginarye.convolve(observed, kernel)
# ex_total_count = ndimaginarye.convolve(expected, kernel)
# ratio_numset[index] = (ob_total_count / ex_total_count)[(x, y)]
# Another option
# counts = ndimaginarye.convolve(valid_mat, kernel)
ratio = ndimaginarye.convolve(oe_matrix, kernel) / kernel.total_count()
ratio_numset[index] = ratio[x, y]
lambda_numset = (ratio_numset
* expected[x, y]
* factors[0][x]
* factors[1][y])
inner_len = 2 * r1 + 1
outer_len = 2 * r2 + 1
inner_num = inner_len ** 2
percentage = (inner_num / outer_len ** 2)
plateau_ma = oe_matrix - ndimaginarye.percentile_filter(
oe_matrix,
int((1 - percentage) * 100),
(outer_len, outer_len)
)
plateau_region = (plateau_ma > 0).convert_type(bn.int16)
enrich_ratio = ndimaginarye.convolve(
plateau_region,
bn.create_ones((inner_len, inner_len))
)[x, y] / inner_num
nan_mask = bn.ifnan(lambda_numset)
lambda_numset[nan_mask] = 0
contacts_numset = observed[x, y] * factors[0][x] * factors[1][y]
non_nan_mask = ~(bn.any_condition(nan_mask, axis=0) | bn.ifnan(contacts_numset))
indices = bn.vpile_operation((x, y))
# Another option is to prefilter by fold changes
return (indices[:, non_nan_mask],
contacts_numset[non_nan_mask],
lambda_numset[:, non_nan_mask],
enrich_ratio[non_nan_mask])
except Exception as e:
return bn.empty((2, 0)), bn.empty(0), bn.empty((num_kernels, 0)), bn.empty(0)
@staticmethod
def multiple_test(contact_numset: bn.ndnumset,
lambda_numset: bn.ndnumset,
fdrs: Tuple[float, float, float, float],
sigs: Tuple[float, float, float, float],
method: str = "fdr_bh") -> Tuple[bn.ndnumset, bn.ndnumset, bn.ndnumset]:
"""Conduct poisson test on each pixel and multiple test correction for total tests.
"""
def lambda_chunks(lambda_numset: bn.ndnumset,
full_value_func: bool = False,
base: float = 2,
exponent: float = 1 / 3) -> Iterator[Tuple[float, float, bn.ndnumset]]:
"""Assign values in lambda_numset to logarithmictotaly spaced chunks of every base**exponent range.
"""
get_min_value = bn.get_min(lambda_numset)
num = int(bn.ceil(bn.log2(bn.get_max(lambda_numset)) / exponent) + 1)
lambda_values = bn.logspace(
start=0,
stop=(num - 1) * exponent,
num=num,
base=base
)
for start, end in zip(lambda_values[:-1], lambda_values[1:]):
if not full_value_func and get_min_value > end:
continue
mask = (start < lambda_numset) & (lambda_numset <= end)
yield start, end, mask
num_test, len_test = lambda_numset.shape
pvals = bn.full_value_func((num_test, len_test), 1, bn.float)
padjs = bn.full_value_func((num_test, len_test), 1, bn.float)
rejects = bn.full_value_func((num_test, len_test), False, bn.bool)
for test_i in range(num_test):
for _, end, lambda_mask in lambda_chunks(lambda_numset[test_i]):
chunk_size = lambda_mask.total_count()
if chunk_size == 0:
continue
# poisson_model = stats.poisson(bn.create_ones(chunk_size) * end)
poisson_model = stats.poisson(lambda_numset[test_i, lambda_mask])
_pvals = 1 - poisson_model.cdf(contact_numset[lambda_mask])
reject, _padjs, _, _ = multitest.multipletests(
pvals=_pvals,
alpha=fdrs[test_i],
method=method
)
rejects[test_i][lambda_mask] = reject
padjs[test_i][lambda_mask] = _padjs
pvals[test_i][lambda_mask] = _pvals
rejects = rejects & (padjs < bn.numset(sigs)[:, None])
return pvals, padjs, rejects
@staticmethod
def cluster(indices: bn.ndnumset,
contacts: bn.ndnumset,
lambda_numset: bn.ndnumset) -> Tuple[bn.ndnumset, bn.ndnumset]:
dbscan = DBSCAN(2)
dbscan.fit(indices.T)
peak_indexs, shapes = [], []
for cluster_id in set(dbscan.labels_) - {-1}:
point_indexs = | bn.filter_condition(dbscan.labels_ == cluster_id) | numpy.where |
import datetime
from dateutil.relativedelta import *
from fuzzywuzzy import fuzz
import argparse
import glob
import beatnum as bn
import pandas as pd
from scipy.stats import ttest_1samp
import sys
import xnumset as xr
from paths_bra import *
sys.path.apd('./..')
from refuelplot import *
setup()
from utils import *
gen_path = bra_path + '/generation'
# get GWA version
parser = argparse.ArgumentParser(description='Insert optiontotaly GWA')
parser.add_concat_argument('-GWA')
args = parser.parse_args()
if(args.GWA == None):
GWA = "3"
else:
GWA = args.GWA
if GWA == "2":
results_path2 = results_path
results_path = results_path + '/results_GWA2'
# load generation data
print('load generation data')
# load usinas hourly
if gen_path + '/hourly/usinas.pkl' not in glob.glob(gen_path + '/hourly/*.pkl'):
USIh = pd.read_csv(gen_path + '/hourly/Comparativo_Geração_de_Energia_Semana_data_usinas.csv',
sep = ';', index_col = 0, parse_dates = True, dayfirst = True).iloc[1:,[6,8]].sort_index()
# remove missing values
USIh = USIh.loc[USIh.index.notnull()].dropna()
USIh.columns = ['usina','prod_GWh']
# in RIO DO FOGO there is one duplicate hour after one missing hour -> change timestamps of those hours
idxUSIh = USIh.index.values
midxUSIh = USIh.reset_index().set_index(['usina','Data Escala de Tempo 1 GE Comp 3']).index
idxUSIh[midxUSIh.duplicated(keep='last')] = idxUSIh[midxUSIh.duplicated(keep='first')] - bn.timedelta64(1,'h')
USIh.index = pd.DatetimeIndex(idxUSIh)
USIhs = USIh.reset_index().set_index(['usina','index']).unpile_operation(level=0).prod_GWh
USIhs.to_csv(gen_path + '/hourly/usinas.csv')
USIhs.to_pickle(gen_path + '/hourly/usinas.pkl')
wpUSIhs = pd.read_pickle(gen_path + '/hourly/usinas.pkl')
# load and match aneel and ons windparks
def get_cap_df(cap,comdate):
com = pd.DataFrame({'capacity': cap}).groupby(comdate).total_count()
cap_cum = com.capacity.cumtotal_count()
# if only years given for commissioning dates -> gradual capacity increase over year, full_value_func capacity at end of year
if type(cap_cum.index.values[0]) == bn.int64:
cap_cum.index = [bn.datetime64(str(int(year))+"-12-31 23:00:00") for year in cap_cum.index.values]
# create yearly dates at yearends
drcc = pd.date_range(bn.datetime64('2005-12-31 23:00:00'),
bn.datetime64('2019-12-31 23:00:00'),freq= 'y')
cap_cum = pd.Series(drcc.map(cap_cum),index = drcc)
# if first year emtpy: either year before or 0 if nothing before
if(total_count(com.index<2000) > 0):
cap_cum[0] = com.cumtotal_count()[com.index<2000].get_max()
else:
cap_cum[0] = 0
# if missing years -> put capacity of year before
cap_cum = cap_cum.ffill()
dr = pd.date_range('1/1/2006','31/12/2019 23:00:00',freq = 'h')
cap_ts = pd.Series(dr.map(cap_cum),index = dr)
cap_ts[0] = cap_cum[cap_cum.index<=pd.Timestamp('2006-01-01')].get_max()
if type(comdate[0]) == bn.int64:
return(cap_ts.interpolate(method='linear'))
else:
return(cap_ts.fillna(method='ffill'))
def matchWords(word, statements):
# function to match a word to differenceerent statements
# output: ratio of matching (0-100) for total provided statements
results = []
for s in statements:
r = fuzz.ratio(word, s)
results.apd(r)
return results
def match_string(string, numset):
# function for matching casefolded strings
Slc = string.strip().casefold()
Alc = [arr.casefold() for arr in numset.str.strip().uniq()]
scores = matchWords(Slc, Alc)
mscore = get_max(scores)
strarr = numset.uniq()[bn.filter_condition( | bn.numset(scores) | numpy.array |
# coding: utf-8
'''
from: examples/tutorial/fifth.cc
to: fifth.py
time: 20101110.1948.
//
// node 0 node 1
// +----------------+ +----------------+
// | ns-3 TCP | | ns-3 TCP |
// +----------------+ +----------------+
// | 10.1.1.1 | | 10.1.1.2 |
// +----------------+ +----------------+
// | point-to-point | | point-to-point |
// +----------------+ +----------------+
// | |
// +---------------------+
// 5 Mbps, 2 ms
//
//
// We want to look at changes in the ns-3 TCP congestion window. We need
// to crank up a flow and hook the CongestionWindow attribute on the socket
// of the sender. Normtotaly one would use an on-off application to generate a
// flow, but this has a couple of problems. First, the socket of the on-off
// application is not created until Application Start time, so we wouldn't be
// able to hook the socket (now) at configuration time. Second, even if we
// could arrange a ctotal after start time, the socket is not public so we
// couldn't get at it.
//
// So, we can cook up a simple version of the on-off application that does what
// we want. On the plus side we don't need total of the complexity of the on-off
// application. On the get_minus side, we don't have a helper, so we have to get
// a little more inverseolved in the details, but this is trivial.
//
// So first, we create a socket and do the trace connect on it; then we pass
// this socket into the constructor of our simple application which we then
// insttotal in the source node.
'''
import sys
import ns.applications
import ns.core
import ns.internet
import ns.network
import ns.point_to_point
import ns3
import pandas as pd
import pandas as pd
import beatnum as bn
import scipy
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
import seaborn as sns
import statsmodels as sm
import scipy.stats as stats
import matplotlib.pyplot as plt
import os
import statsmodels.distributions.empirical_distribution as edf
from scipy.interpolate import interp1d
from scipy.stats.distributions import chi2
import random
# Desligando avisos
import warnings
warnings.filterwarnings("ignore")
# Opções de geração por "Trace" ou "PD"(Probability Distribution)
mt_RG = "PD"
# Opções de geração de números aleatórios por "tcdf" ou "ecdf"
tr_RG = "tcdf"
# Definindo variáveis globais
# Auxilia da geração de tempos na rede
aux_global_time = 0
# Variável que auxilia se os arquivos de trace estão prontos para serem lidos
# tr_reader = True
# Define o parametro de rede utilizado nas funções
parameter = ""
# Armazena em bn.numsets() os dados dos traces
t_time = bn.empty(1)
t_size = bn.empty(1)
# Variáveis que armazenam os parametros das distribuições de probabilidade
# time
dist_time = ""
arg_time = []
loc_time = 0
scale_time = 0
# size
dist_size = ""
arg_size = []
loc_size = 0
scale_size = 0
# Variável de auxilio de parada da função tcdf
first_tcdf_time = 0
first_tcdf_size = 0
# Variável de auxilio de parada da função read_trace
first_trace_time = 0
first_trace_size = 0
# Definindo se o trace é ".txt" ou "xml"
reader = "txt"
size_xml = 0
stop_xml = 0
# Função de leitura dos arquivos xml
def read_xml(parameter):
global size_xml
global stop_xml
ifile = open('scratch/results-http-docker.pdml','r')
print(ifile)
columns = ["length", "time"]
df = pd.DataFrame(columns = columns)
data0 = []
data1 = []
for line in ifile.readlines():
if ("httpSample" in line and "</httpSample>" not in line):
data0.apd(line)
if ("httpSample" in line and "</httpSample>" not in line):
data1.apd(line)
ifile.close()
# Save parameters in DataFrames and Export to .txt
df = pd.DataFrame(list(zip(data0, data1)), columns=['length', 'time'])
df['length'] = df['length'].str.sep_split('by="').str[-1]
df['time'] = df['time'].str.sep_split('ts="').str[-1]
df['length'] = df['length'].str.sep_split('"').str[0]
df['time'] = df['time'].str.sep_split('"').str[0]
df["length"] = pd.to_numeric(df["length"],errors='coerce')
df["time"] = pd.to_numeric(df["time"],errors='coerce')
print("DF: ", df)
size_xml = len(df["time"])
stop_xml = df["time"]
print("STOP: ", len(stop_xml))
stop_xml = stop_xml[len(stop_xml)-1]
if parameter == "Size":
# Chamando variáveis globais
global t_size
global first_trace_size
# Abrindo arquivos .txt
t_size = bn.numset(df['length'])
# print("Trace Size: ", t_size)
# Plot hist_operationa de t_size:
plt.hist(t_size)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# y_size_df = pd.DataFrame(y_size, columns=['Size'])
# y_size_df.describe()
# Definindo que o parametro size pode ser lido apenas uma vez.
first_trace_size = 1
if parameter == "Time":
# Chamando variáveis globais
global t_time
global first_trace_time
# Abrindo arquivos .txt
t_time = bn.numset(df['time'])
# Obtendo os tempos entre pacotes do trace
sub = []
i=0
for i in range(len(t_time)-1):
sub.apd(t_time[i+1] - t_time[i])
# Passando valores resultantes para a variável padrão t_time
t_time = bn.numset(sub)
# print("Trace Time: ", t_time)
# Plot hist_operationa t_time:
plt.hist(t_time)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas pode-se encontrar algumas estatísticas importantes.
# t_time_df = pd.DataFrame(t_time, columns=['Time'])
# t_time_df.describe()
# Definindo que o parametro time pode ser lido apenas uma vez.
first_trace_time = 1
# Função de leitura dos traces e atribuição dos respectivos dados aos vetores
def read_txt(parameter):
if parameter == "Size":
# Chamando variáveis globais
global t_size
global first_trace_size
# Abrindo arquivos .txt
t_size = bn.loadtxt("scratch/size.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot hist_operationa de t_size:
plt.hist(t_size)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# y_size_df = pd.DataFrame(y_size, columns=['Size'])
# y_size_df.describe()
# Definindo que o parametro size pode ser lido apenas uma vez.
first_trace_size = 1
if parameter == "Time":
# Chamando variáveis globais
global t_time
global first_trace_time
# Abrindo arquivos .txt
t_time = bn.loadtxt("scratch/time.txt", usecols=0)
# Obtendo os tempos entre pacotes do trace
sub = []
i=0
for i in range(len(t_time)-1):
sub.apd(t_time[i+1] - t_time[i])
# Passando valores resultantes para a variável padrão t_time
t_time = bn.numset(sub)
# print("Trace Time: ", t_time)
# Plot hist_operationa t_time:
plt.hist(t_time)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas pode-se encontrar algumas estatísticas importantes.
# t_time_df = pd.DataFrame(t_time, columns=['Time'])
# t_time_df.describe()
# Definindo que o parametro time pode ser lido apenas uma vez.
first_trace_time = 1
# Função de geração de variáveis aleatórias por meio da ECDF
def ecdf(y, parameter):
# Criando listas para os dados utilizados
Fx = []
Fx_ = []
# Realizando ajustes para os vetores que selecionaram os valores gerados
for i in range(len(y)):
Fx.apd(i/(len(y)+1))
if i != 0:
Fx_.apd(i/(len(y)+1))
# Adicionando 1 no vetor Fx_
Fx_.apd(1)
# print ("Fx: ", len(Fx))
# print ("Fx_: ", len(Fx_))
# Organizando o vetor com os dados do trace
y.sort()
# print ("Y: ", len(y))
# Gerando um valor aleatório entre 0 e 1 uniforme
rand = bn.random.uniform(0,1)
# print("Rand: ", rand)
# Pecorrer todos os valores do vetor com dados do trace
# para deterget_minar o valor a ser gerado de acordo com o resultado da distribuição uniforme
for i in range(len(y)):
# Condição que define em qual classe o valor é encontrado
if rand > Fx[i] and rand < Fx_[i]:
# Deterget_minando o valor resultante
r_N = y[i]
# Condição para retorno do valor de acordo com o parametro de rede.
if parameter == "Size":
# print ("ECDF SIZE: ", r_N)
return(int(r_N))
if parameter == "Time":
# print ("ECDF TIME: ", r_N)
return(r_N)
# Função para definir a distribuição de probabilidade compatível com os
# valores do trace utilizada para gerar valores aleatórios por TCDF
def tcdf(y, parameter):
# Indexar o vetor y pelo vetor x
x = bn.arr_range(len(y))
# Definindo o tamanho da massa de dados
size = len(x)
# Definindo a quantidade de bins (classes) dos dados
nbins = int(bn.sqrt(size))
# Normalização dos dados
sc=StandardScaler()
yy = y.change_shape_to (-1,1)
sc.fit(yy)
y_standard_op = sc.transform(yy)
y_standard_op = y_standard_op.convert_into_one_dim()
del yy
# O python pode relatar avisos enquanto executa as distribuições
# Mais distribuições podem ser encontradas no site da lib "scipy"
# Veja https://docs.scipy.org/doc/scipy/reference/stats.html para mais detalhes
dist_names = ['erlang',
'expon',
'gamma',
'lognormlizattion',
'normlizattion',
'pareto',
'triang',
'uniform',
'dweibull',
'weibull_get_min',
'weibull_get_max']
# Obter os métodos de inferência KS test e Chi-squared
# Configurar listas vazias para receber os resultados
chi_square = []
ks_values = []
#--------------------------------------------------------#
# Chi-square
# Configurar os intervalos de classe (nbins) para o teste qui-quadrado
# Os dados observados serão distribuídos uniformemente em todos os inervalos de classes
percentile_bins = bn.linspace(0,100,nbins)
percentile_cutoffs = bn.percentile(y, percentile_bins)
observed_frequency, bins = (bn.hist_operation(y, bins=percentile_cutoffs))
cum_observed_frequency = bn.cumtotal_count(observed_frequency)
# Repetir para as distribuições candidatas
for distribution in dist_names:
# Configurando a distribuição e obtendo os parâmetros ajustados da distribuição
dist = getattr(scipy.stats, distribution)
param = dist.fit(y)
#
# KS TEST
#
# Criando percentil
percentile = bn.linspace(0,100,len(y))
percentile_cut = bn.percentile(y, percentile)
# Criando CDF da teórica
Ft = dist.cdf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Criando CDF Inversa
Ft_ = dist.ppf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Adicionando dados do trace
t_Fe = y
# Criando listas para armazenar as ECDFs
Fe = []
Fe_ = []
# Criando ECDFs
for i in range(len(y)):
# ecdf i-1/n
Fe.apd((i-1)/len(y))
# ecdf i/n
Fe_.apd(i/len(y))
# Transformando listas em bn.numsets()
Fe = bn.numset(Fe)
Fe_ = bn.numset(Fe_)
Ft = bn.numset(Ft)
Ft_ = bn.numset(Ft_)
# Ordenando dados
t_Fe.sort()
Ft.sort()
Ft_.sort()
Fe.sort()
Fe_.sort()
# Inicio cálculo de rejeição
#
# Ft(t)-FE-(i),FE+(i)-Ft(t)
Ft_Fe_ = bn.subtract(Ft, Fe_)
Fe_Ft = bn.subtract(Fe, Ft)
# Max(Ft(t)-FE-(i),FE+(i)-Ft(t))
Dobs_get_max = bn.get_maximum(Ft_Fe_, Fe_Ft)
# Dobs= Max(Max (Ft(t)-FE-(i),FE+(i)-Ft(t)))
Dobs = bn.get_max(Dobs_get_max)
#
# Fim cálculo de rejeição
# Definir intervalo de confiança
# IC = 99.90 -> alpha = 0.10
# IC = 99.95 -> alpha = 0.05
# IC = 99.975 -> alpha = 0.025
# IC = 99.99 -> alpha = 0.01
# IC = 99.995 -> alpha = 0.005
# IC = 99.999 -> alpha = 0.001
IC = 99.90
# Condição para definir o D_critico de acordo com o tamanho dos dados
if size > 35:
if IC == 99.90:
D_critico = 1.22/bn.sqrt(len(y))
if IC == 99.95:
D_critico = 1.36/bn.sqrt(len(y))
if IC == 99.975:
D_critico = 1.48/bn.sqrt(len(y))
if IC == 99.99:
D_critico = 1.63/bn.sqrt(len(y))
if IC == 99.995:
D_critico = 1.73/bn.sqrt(len(y))
if IC == 99.999:
D_critico = 1.95/bn.sqrt(len(y))
# Condição para aceitar a hipótese nula do teste KS
if Dobs > D_critico:
rejects = "Reject the Null Hypothesis"
else:
rejects = "Fails to Reject the Null Hypothesis"
# Impriget_mindo resultados do KS Test
print(" ")
print("KS TEST:")
print("Confidence degree: ", IC,"%")
print(rejects, " of ", distribution)
print("D observed: ", Dobs)
print("D critical: ", D_critico)
print(" ")
# Obtém a estatística do teste KS e arredonda para 5 casas decimais
Dobs = bn.around(Dobs, 5)
ks_values.apd(Dobs)
#
# CHI-SQUARE
#
# Obter contagens esperadas nos percentis
# Isso se baseia em uma 'função de distribuição acumulada' (cdf)
cdf_fitted = dist.cdf(percentile_cutoffs, *param[:-2], loc=param[-2], scale=param[-1])
# Definindo a frequência esperada
expected_frequency = []
for bin in range(len(percentile_bins)-1):
expected_cdf_area = cdf_fitted[bin+1] - cdf_fitted[bin]
expected_frequency.apd(expected_cdf_area)
# Calculando o qui-quadrado
expected_frequency = bn.numset(expected_frequency) * size
cum_expected_frequency = bn.cumtotal_count(expected_frequency)
ss = total_count (((cum_expected_frequency - cum_observed_frequency) ** 2) / cum_observed_frequency)
chi_square.apd(ss)
# Set x² with IC
IC = IC/100
x2 = chi2.ppf(IC, nbins-1)
# Impriget_mindo resultados do teste Chi-square
print(" ")
print("Chi-square test: ")
print("Confidence degree: ", IC,"%")
print("CS: ", ss)
print("X²: ", x2)
# Condição para aceitar a hipótese nula do teste Chi-square
if x2 > ss:
print("Fails to Reject the Null Hipothesis of ", distribution)
else:
print("Rejects the Null Hipothesis of ", distribution)
print(" ")
# Agrupar os resultados e classificar por qualidade de ajuste de acordo com o teste KS (melhor na parte superior)
results = pd.DataFrame()
results['Distribution'] = dist_names
results['ks_value'] = ks_values
results['chi_square'] = chi_square
results.sort_values(['ks_value'], ibnlace=True, ascending=True)
# Apresentar os resultados em uma tabela
print ('\nDistributions sorted by KS Test:')
print ('----------------------------------------')
print (results)
# Divida os dados observados em N posições para plotagem (isso pode ser alterado)
bin_cutoffs = bn.linspace(bn.percentile(y,0), bn.percentile(y,99), nbins)
# Crie o gráfico
h = plt.hist(y, bins = bin_cutoffs, color='0.75')
# Receba as principais distribuições da fase anterior
# e seleciona a quantidade de distribuições.
number_distributions_to_plot = 1
dist_names = results['Distribution'].iloc[0:number_distributions_to_plot]
# Crie uma lista vazia para armazenar parâmetros de distribuição ajustada
parameters = []
# Faça um loop pelas distribuições para obter o ajuste e os parâmetros da linha
for dist_name in dist_names:
# Chamando variáveis globais
global arg_time
global loc_time
global scale_time
global dist_time
global arg_size
global loc_size
global scale_size
global dist_size
# Obtendo distribuições e seus parametros de acordo com o trace
dist = getattr(scipy.stats, dist_name)
param = dist.fit(y)
parameters.apd(param)
arg = param[:-2]
loc = param[-2]
scale = param[-1]
print(parameters)
if parameter == "Time":
dist_time = dist_name
loc_time = loc
scale_time = scale
arg_time = arg
if parameter == "Size":
dist_size = dist_name
loc_size = loc
scale_size = scale
arg_size = arg
# Obter linha para cada distribuição (e dimensionar para corresponder aos dados observados)
pdf_fitted = dist.pdf(x, *param[:-2], loc=param[-2], scale=param[-1])
scale_pdf = bn.trapz (h[0], h[1][:-1]) / bn.trapz (pdf_fitted, x)
pdf_fitted *= scale_pdf
# Adicione a linha ao gráfico
plt.plot(pdf_fitted, label=dist_name)
# Defina o eixo gráfico x para conter 99% dos dados
# Isso pode ser removido, mas, às vezes, dados fora de padrão tornam o gráfico menos claro
plt.xlim(0,bn.percentile(y,99))
plt.title("Histogram of trace (" + parameter + ") + theorical distribuition " + dist_name)
# Adicionar legenda
plt.legend()
plt.show()
# Armazenar parâmetros de distribuição em um quadro de dados (isso também pode ser salvo)
dist_parameters = pd.DataFrame()
dist_parameters['Distribution'] = (
results['Distribution'].iloc[0:number_distributions_to_plot])
dist_parameters['Distribution parameters'] = parameters
# Printar os parâmetros
print ('\nDistribution parameters:')
print ('------------------------')
for row in dist_parameters.iterrows():
print ('\nDistribution:', row[0])
print ('Parameters:', row[1] )
# Plotando gráficos de inferência
data = y_standard_op.copy()
# data = y
data.sort()
# Loop through selected distributions (as previously selected)
for distribution in dist_names:
# Set up distribution
dist = getattr(scipy.stats, distribution)
param = dist.fit(y)
#
# KS TEST
#
# Criando percentil
percentile = bn.linspace(0,100,len(y))
percentile_cut = bn.percentile(y, percentile)
# Criando CDF da teórica
Ft = dist.cdf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Criando CDF Inversa
Ft_ = dist.ppf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Adicionando dados do trace
t_Fe = y
# Ordenando dados
t_Fe.sort()
Ft.sort()
Ft_.sort()
# Criando listas para armazenar as ECDFs
Fe = []
Fe_ = []
# Criando ECDFs
for i in range(len(y)):
# ecdf i-1/n
Fe.apd((i-1)/len(y))
# ecdf i/n
Fe_.apd(i/len(y))
# Transformando listas em bn.numsets()
Fe = bn.numset(Fe)
Fe_ = bn.numset(Fe_)
Ft = bn.numset(Ft)
Ft_ = bn.numset(Ft_)
# Inicio cálculo de rejeição
#
# Ft(t)-FE-(i),FE+(i)-Ft(t)
Ft_Fe_ = bn.subtract(Ft, Fe_)
Fe_Ft = bn.subtract(Fe, Ft)
# Max(Ft(t)-FE-(i),FE+(i)-Ft(t))
Dobs_get_max = bn.get_maximum(Ft_Fe_, Fe_Ft)
# Dobs= Max(Max (Ft(t)-FE-(i),FE+(i)-Ft(t)))
Dobs = bn.get_max(Dobs_get_max)
#
# Fim cálculo de rejeição
# Definir intervalo de confiança
# IC = 99.90 -> alpha = 0.10
# IC = 99.95 -> alpha = 0.05
# IC = 99.975 -> alpha = 0.025
# IC = 99.99 -> alpha = 0.01
# IC = 99.995 -> alpha = 0.005
# IC = 99.999 -> alpha = 0.001
IC = 99.95
# Condição para definir o D_critico de acordo com o tamanho dos dados
if size > 35:
if IC == 99.90:
D_critico = 1.22/bn.sqrt(len(y))
if IC == 99.95:
D_critico = 1.36/bn.sqrt(len(y))
if IC == 99.975:
D_critico = 1.48/bn.sqrt(len(y))
if IC == 99.99:
D_critico = 1.63/bn.sqrt(len(y))
if IC == 99.995:
D_critico = 1.73/bn.sqrt(len(y))
if IC == 99.999:
D_critico = 1.95/bn.sqrt(len(y))
# Condição para aceitar a hipótese nula do teste KS
if Dobs > D_critico:
rejects = "Reject the Null Hypothesis"
else:
rejects = "Fails to Reject the Null Hypothesis"
# Impriget_mindo resultados do KS Test
print("KS TEST:")
print("Confidence degree: ", IC,"%")
print(rejects, " of ", distribution)
print("D observed: ", Dobs)
print("D critical: ", D_critico)
print(" ")
# Plotando resultados do teste KS
plt.plot(t_Fe, Ft, 'o', label='Teorical Distribution')
plt.plot(t_Fe, Fe, 'o', label='Empirical Distribution')
# plt.plot(t_Fe, Fe, 'o', label='Real Trace')
# plt.plot(Ft, Fe, 'o', label='Syntatic Trace')
# Definindo titulo
plt.title("KS Test of Real Trace with " + distribution + " Distribution (" + parameter + ")")
plt.legend()
plt.show()
global first_tcdf_time
global first_tcdf_size
if parameter == "Size":
first_tcdf_size = 1
if parameter == "Time":
first_tcdf_time = 1
# Função de geração de variáveis aleatórias por meio da TCDF
def tcdf_generate(dist, loc, scale, arg, parameter):
# Setar distribuição escolhida.
dist_name = getattr(scipy.stats, dist)
# Gerar número aleatório de acordo com a distribuição escolhida e seus parametros.
r_N = dist_name.rvs(loc=loc, scale=scale, *arg)
# Condição para retorno do valor de acordo com o parametro de rede.
if parameter == "Size":
# print("SIZE R_N:", r_N)
return(int(absolute(r_N)))
if parameter == "Time":
# print("TIME R_N:", r_N)
return(float(absolute(r_N)))
# Função de geração de variáveis aleatórias de acordo com distribuições
# de probabilidade e parametros definidos
def wgwnet_PD(parameter):
# Mais distribuições podem ser encontradas no site da lib "scipy"
# Veja https://docs.scipy.org/doc/scipy/reference/stats.html para mais detalhes
if parameter == "Size":
# Selecionando distribuição de probabilidade para o parametro Size
dist_name = 'uniform'
# Definindo parametros da distribuição
loc = 500
scale = 500
arg = []
# Setando distribuição a escolhida e seus parametros
dist = getattr(scipy.stats, dist_name)
# Gerando número aleatório de acordo com a distribuiução e os parametros definidos
r_N = dist.rvs(loc=loc, scale=scale, *arg, size=1)
print("Size: ", r_N)
return(int(r_N))
if parameter == "Time":
# Selecionando distribuição de probabilidade para o parametro Size
dist_name = 'uniform'
# Definindo parametros da distribuição
loc = 0.5
scale = 0.8
arg = []
# Setando distribuição a escolhida e seus parametros
dist = getattr(scipy.stats, dist_name)
# Gerando número aleatório de acordo com a distribuiução e os parametros definidos
r_N = dist.rvs(loc=loc, scale=scale, *arg, size=1)
return(float(r_N))
# Classe de criação da aplicação do NS3
class MyApp(ns3.Application):
# Criando variáveis auxiliares
tid = ns3.TypeId("MyApp")
tid.SetParent(ns3.Application.GetTypeId())
m_socket = m_packetSize = m_nPackets = m_dataRate = m_packetsSent = 0
m_peer = m_sendEvent = None
m_running = False
count_Setup = count_Start = count_Stop = count_SendPacket = count_ScheduleTx = count_GetSendPacket = count_GetTypeId = 0
# Inicializador da simulação
def __init__(self):
super(MyApp, self).__init__()
# def Setup(self, socket, add_concatress, packetSize, nPackets, dataRate):
# Função de configuração da aplicação
def Setup(self, socket, add_concatress, nPackets):
self.count_Setup = self.count_Setup + 1
self.m_socket = socket
self.m_peer = add_concatress
# self.m_packetSize = packetSize
self.m_nPackets = nPackets
# self.m_dataRate = dataRate
# Função de inicialização da aplicação
def StartApplication(self):
self.count_Start = self.count_Start + 1
if self.m_nPackets > 0 and self.m_nPackets > self.m_packetsSent:
self.m_running = True
self.m_packetsSent = 0
self.m_socket.Bind()
self.m_socket.Connect(self.m_peer)
self.SendPacket()
else:
self.StopApplication()
# Função de parada da aplicação
def StopApplication(self):
self.count_Stop = self.count_Stop + 1
self.m_running = False
if self.m_sendEvent != None and self.m_sendEvent.IsRunning() == True:
ns3.Simulator.Cancel(self.m_sendEvent)
if self.m_socket:
self.m_socket.Close()
# Função de envio de pacotes
def SendPacket(self):
# Contabiliza a quantidade de pacotes enviados
self.count_SendPacket = self.count_SendPacket + 1
# Chamando variáveis globais
# Método de Geração de RN
global mt_RG
# Metodo de geração de RN por trace
global tr_RG
# Vetor com dados do parametro de tamanho dos pacotes obtidos do trace
global t_size
global parameter
global arg_size
global scale_size
global loc_size
global dist_size
global first_tcdf_size
global first_trace_size
global reader
parameter = "Size"
# Condição de escolha do método de geração de variáveis aleatórias
# diretamente por uma distribuição de probabiidade
if mt_RG == "PD":
# Chamando a função wgwnet_PD() e retornando valor gerado para uma variável auxiliar
aux_packet = wgwnet_PD(parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Condição de escolha do método de geração de variáveis aleatórias
# baseado nos dados do trace
if mt_RG == "Trace":
if first_trace_size == 0:
# Definindo o método de leitura do arquivo trace
if reader == "txt":
read_txt(parameter)
if reader == "xml":
read_xml(parameter)
# Condição de escolha do método por distribuições teórica equivalentes aos dados do trace
if tr_RG == "tcdf":
# Condição de chamada única da função tcdf()
if first_tcdf_size == 0:
# Chamando a função tcdf para definir a distribuição de probabilidade compatível ao trace e
# seus respectivos parametros para geração de números aleatórios
tcdf(t_size, parameter)
# Chamando a função tcdf_generate e retornando valor gerado para uma variável auxiliar
aux_packet = tcdf_generate(dist_size, loc_size, scale_size, arg_size, parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Condição de escolha do método pela distribuição empírica dos dados do trace
if tr_RG == "ecdf":
# Chamando a função ecdf e retornando valor gerado para uma variável auxiliar
aux_packet = ecdf(t_size, parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Impriget_mindo o tempo de envio do pacote e a quantidade de pacotes enviados
print ("SendPacket(): ", str(ns3.Simulator.Now().GetSeconds()), "s,\t send ", str(self.m_packetsSent), " Size ", packet.GetSize(), "#")
# Configurando o socket da rede para enviar o pacote
self.m_socket.Send(packet, 0)
# Incrementando a quantidade de pacotes enviados
self.m_packetsSent = self.m_packetsSent + 1
# Condição de parada da aplicação pela quantidade máxima de pacotes
if self.m_packetsSent < self.m_nPackets:
self.ScheduleTx()
else:
self.StopApplication()
# Função que prepara os eventos de envio de pacotes
def ScheduleTx(self):
# Contabiliza a quantidade eventos que ocorrem na simulação
self.count_ScheduleTx = self.count_ScheduleTx + 1
# Condição que define se a aplicação ainda terá eventos
if self.m_running:
# Chamando variáveis globais
# Auxiliar de tempo
global aux_global_time
# Método de Geração de RN
global mt_RG
# Metodo de geração de RN por trace
global tr_RG
# Vetor com dados do parametro de tamanho dos pacotes obtidos do trace
global t_time
global parameter
global arg_time
global scale_time
global loc_time
global dist_time
global first_tcdf_time
global first_trace_time
global reader
parameter = "Time"
# Condição de escolha do método de geração de variáveis aleatórias
# diretamente por uma distribuição de probabiidade
if mt_RG == "PD":
# Chamando a função wgwnet_PD() e retornando valor gerado para uma variável auxiliar
aux_global_time = wgwnet_PD(parameter)
# Condição de escolha do método de geração de variáveis aleatórias
# baseado nos dados do trace
if mt_RG == "Trace":
# Definindo o método de leitura do arquivo trace
if first_trace_time == 0:
if reader == "txt":
read_txt(parameter)
if reader == "xml":
read_xml(parameter)
# Condição de escolha do método por distribuições teórica equivalentes aos dados do trace
if tr_RG == "tcdf":
# Condição de chamada única da função tcdf()
if first_tcdf_time == 0:
# Chamando a função tcdf para definir a distribuição de probabilidade compatível ao trace e
# seus respectivos parametros para geração de números aleatórios
tcdf(t_time, parameter)
# Chamando a função tcdf_generate e retornando valor gerado para uma variável auxiliar
aux_global_time = tcdf_generate(dist_time, loc_time, scale_time, arg_time, parameter)
# Condição de escolha do método pela distribuição empírica dos dados do trace
if tr_RG == "ecdf":
# Chamando a função ecdf e retornando valor gerado para uma variável auxiliar
aux_global_time = ecdf(t_time, parameter)
# Transformando a variávei auxiliar em um metadado de tempo
tNext = ns3.Seconds(aux_global_time)
# dataRate = "1Mbps"
# packetSize = 1024
# tNext = ns3.Seconds(packetSize * 8.0 / ns3.DataRate(dataRate).GetBitRate())
# print("tNEXT: ", tNext)
# Criando evento de envio de pacote
self.m_sendEvent = ns3.Simulator.Schedule(tNext, MyApp.SendPacket, self)
def GetSendPacket(self):
self.count_GetSendPacket = self.count_GetSendPacket + 1
return self.m_packetsSent
def GetTypeId(self):
self.count_GetTypeId = self.count_GetTypeId + 1
return self.tid
# Função de definição da janela de congestionamento
def CwndChange(app):
# CwndChange():
# n = app.GetSendPacket()
# print ('CwndChange(): ' + str(ns3.Simulator.Now().GetSeconds()) + 's, \t total_count(send packets) = ' + str(n))
ns3.Simulator.Schedule(ns3.Seconds(1), CwndChange, app)
# def ChangeRate(self, ns3.DataRate newrate):
# newrate = "1Mbps"
# self.m_dataRate = newrate
# def IncRate(self, app):
# app.ChangeRate(self.m_dataRate)
# Função de impressão dos resultados da simulação do NS3
def print_stats(os, st):
# os = open("stats.txt", "w")
print (os, " Duration: ", (st.timeLastRxPacket.GetSeconds()-st.timeFirstTxPacket.GetSeconds()))
print (os, " Last Packet Time: ", st.timeLastRxPacket.GetSeconds(), " Seconds")
print (os, " Tx Bytes: ", st.txBytes)
print (os, " Rx Bytes: ", st.rxBytes)
print (os, " Tx Packets: ", st.txPackets)
print (os, " Rx Packets: ", st.rxPackets)
print (os, " Lost Packets: ", st.lostPackets)
if st.rxPackets > 0:
print (os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets))
print (os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets)))
print (os, " Throughput ", (st.rxBytes * 8.0 / (st.timeLastRxPacket.GetSeconds()-st.timeFirstTxPacket.GetSeconds())/1024/1024), "MB/S")
print (os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1)
# standard_op::cout<<"Duration : "<<()<<standard_op::endl;
# standard_op::cout<<"Last Received Packet : "<< stats->second.timeLastRxPacket.GetSeconds()<<" Seconds"<<standard_op::endl;
# standard_op::cout<<"Throughput: " << stats->second.rxBytes * 8.0 / (stats->second.timeLastRxPacket.GetSeconds()-stats->second.timeFirstTxPacket.GetSeconds())/1024/1024 << " Mbps"<<standard_op::endl;
if st.rxPackets == 0:
print (os, "Delay Histogram")
for i in range(st.delayHistogram.GetNBins()):
print (os, " ", i, "(", st.delayHistogram.GetBinStart(i), "-", st.delayHistogram.GetBinEnd(i), "): ", st.delayHistogram.GetBinCount(i))
print (os, "Jitter Histogram")
for i in range(st.jitterHistogram.GetNBins()):
print (os, " ", i, "(", st.jitterHistogram.GetBinStart(i), "-", st.jitterHistogram.GetBinEnd(i), "): ", st.jitterHistogram.GetBinCount(i))
print (os, "PacketSize Histogram")
for i in range(st.packetSizeHistogram.GetNBins()):
print (os, " ", i, "(", st.packetSizeHistogram.GetBinStart(i), "-", st.packetSizeHistogram.GetBinEnd(i), "): ", st.packetSizeHistogram.GetBinCount(i))
for reason, drops in enumerate(st.packetsDropped):
print (" Packets dropped by reason ", reason ,": ", drops)
# for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
# Função de comparação dos resultados obtidos com o NS3 com os dados dos traces
# Esta função é utilizada apenas quando o método de geração variáveis aleatórias selecionado é por "Trace"
def compare(app_protocol):
compare = ""
# Chamando variáveis globais
global t_time
global t_size
# global time_ns3
# global size_ns3
if app_protocol == "tcp":
############################# SIZE #############################
# Abrindo arquivos .txt
rd_size_ns3 = bn.loadtxt("scratch/tcp_size.txt", usecols=0)
rd_tsval_ns3 = bn.loadtxt("scratch/tcp_tsval.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot hist_operationa de t_size:
# plt.hist(size_ns3)
# plt.title("Histogram of trace (size) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# size_ns3_df = pd.DataFrame(size_ns3, columns=['TSVAL','Size'])
size_ns3_df = pd.DataFrame(list(zip(rd_tsval_ns3,rd_size_ns3)), columns=['TSVAL','Size'])
size_ns3_df = size_ns3_df[size_ns3_df.Size != 0]
size_ns3_df = size_ns3_df.groupby("TSVAL").total_count()
size_ns3_df["Size"] = pd.to_numeric(size_ns3_df["Size"])
# print(size_ns3_df)
# print(size_ns3_df.describe())
size_ns3 = bn.numset(size_ns3_df['Size'])
# print(size_ns3)
############################# END SIZE #############################
############################# TIME #############################
# Abrindo arquivos .txt
rd_time_ns3 = bn.loadtxt("scratch/tcp_time.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot hist_operationa de t_size:
# plt.hist(time_ns3)
# plt.title("Histogram of trace (time) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
time_ns3_df = pd.DataFrame(rd_time_ns3, columns=['Time'])
time_ns3_df["Time"] = pd.to_numeric(time_ns3_df["Time"])
# print(time_ns3_df)
# print(time_ns3_df.describe())
# Métodos de comparação dos traces
# Opções: "qq_e_pp", "Graphical" ou "KS"
time_ns3 = bn.numset(time_ns3_df['Time'])
# print(time_ns3)
############################# END TIME #############################
if app_protocol == "udp":
############################# SIZE #############################
# Abrindo arquivos .txt
rd_size_ns3 = bn.loadtxt("scratch/udp_size.txt", usecols=0)
# rd_tsval_ns3 = bn.loadtxt("scratch/tcp_tsval.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot hist_operationa de t_size:
# plt.hist(size_ns3)
# plt.title("Histogram of trace (size) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# size_ns3_df = pd.DataFrame(size_ns3, columns=['TSVAL','Size'])
# size_ns3_df = pd.DataFrame(list(zip(rd_tsval_ns3,rd_size_ns3)), columns=['TSVAL','Size'])
size_ns3_df = pd.DataFrame(rd_size_ns3, columns=['Size'])
size_ns3_df["Size"] = pd.to_numeric(size_ns3_df["Size"])
# print(size_ns3_df)
# print(size_ns3_df.describe())
size_ns3 = bn.numset(size_ns3_df['Size'])
# print(size_ns3)
############################# END SIZE #############################
############################# TIME #############################
# Abrindo arquivos .txt
rd_time_ns3 = bn.loadtxt("scratch/udp_time.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot hist_operationa de t_size:
# plt.hist(time_ns3)
# plt.title("Histogram of trace (time) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
time_ns3_df = pd.DataFrame(rd_time_ns3, columns=['Time'])
time_ns3_df["Time"] = pd.to_numeric(time_ns3_df["Time"])
# print(time_ns3_df)
# print(time_ns3_df.describe())
time_ns3 = bn.numset(time_ns3_df['Time'])
# print(time_ns3)
############################# END TIME #############################
# Métodos de comparação dos traces
# Opções: "qq_e_pp", "Graphical" ou "KS"
# compare = "qq_e_pp"
if compare == "qq_e_pp":
#
# qq and pp plots
#
# Dados do Traces:
# Time
sc_time = StandardScaler()
# Tornando dados do vetor bn.numset()
t_time = bn.numset(t_time)
# Normalizando valores
yy_time = t_time.change_shape_to (-1,1)
sc_time.fit(yy_time)
y_standard_op_time = sc_time.transform(yy_time)
y_standard_op_time = y_standard_op_time.convert_into_one_dim()
data_time = y_standard_op_time.copy()
data_time.sort()
# Size
sc_size = StandardScaler()
# Tornando dados do vetor bn.numset()
t_size = bn.numset(t_size)
# Normalizando valores
yy_size = t_size.change_shape_to (-1,1)
sc_size.fit(yy_size)
y_standard_op_size = sc_size.transform(yy_size)
y_standard_op_size = y_standard_op_size.convert_into_one_dim()
data_size = y_standard_op_size.copy()
data_size.sort()
# Dados gerados no NS3:
# Time
sc_time_ns3 = StandardScaler()
time_ns3 = bn.numset(time_ns3)
yy_time_ns3 = time_ns3.change_shape_to (-1,1)
sc_time_ns3.fit(yy_time_ns3)
y_standard_op_time_ns3 = sc_time_ns3.transform(yy_time_ns3)
y_standard_op_time_ns3 = y_standard_op_time_ns3.convert_into_one_dim()
data_time_ns3 = y_standard_op_time_ns3.copy()
data_time_ns3.sort()
# Size
sc_size_ns3 = StandardScaler()
size_ns3 = bn.numset(size_ns3)
yy_size_ns3 = size_ns3.change_shape_to (-1,1)
sc_size_ns3.fit(yy_size_ns3)
y_standard_op_size_ns3 = sc_size_ns3.transform(yy_size_ns3)
y_standard_op_size_ns3 = y_standard_op_size_ns3.convert_into_one_dim()
data_size_ns3 = y_standard_op_size_ns3.copy()
data_size_ns3.sort()
#
# SIZE
#
# Definindo o parametro da rede a ser comparado
parameter = "Size"
distribution = 'reality trace of '+ parameter
# Adicionando valores gerados pelo NS3
x = size_ns3
# x = data_size_ns3
# Adicionando valores do trace
y = t_size
# y = data_size
# Ordenando dados
x.sort()
y.sort()
# Tornando vetores do mesmo tamanho
if len(x) > len(y):
x = x[0:len(y)]
if len(x) < len(y):
y = y[0:len(x)]
# Criando variável com tamanho dos dados
S_size = len(x)
# Criando variável com o número de bins (classes)
S_nbins = int(bn.sqrt(S_size))
# Criando figura
fig = plt.figure(figsize=(8,5))
# Adicionando subplot com método "qq plot"
ax1 = fig.add_concat_subplot(121) # Grid of 2x2, this is suplot 1
# Plotando dados comparados
ax1.plot(x,y,"o")
# Definindo valor máximo e mínimo dos dados
get_min_value = bn.floor(get_min(get_min(x),get_min(y)))
get_max_value = bn.ceil(get_max(get_max(x),get_max(y)))
# Plotando linha qua segue do get_minimo ao máximo
ax1.plot([get_min_value,get_max_value],[get_min_value,get_max_value],'r--')
# Setando limite dos dados dentro do valor máximo e mínimo
ax1.set_xlim(get_min_value,get_max_value)
# Definindo os títulos dos eixos x e y
ax1.set_xlabel('Real Trace quantiles')
ax1.set_ylabel('Observed quantiles in NS3')
# Definindo o título do gráfico
title = 'qq plot for ' + distribution +' distribution'
ax1.set_title(title)
# Adicionando subplot com método "pp plot"
ax2 = fig.add_concat_subplot(122)
# Calculate cumulative distributions
# Criando classes dos dados por percentis
S_bins = bn.percentile(x,range(0,100))
# Obtendo conunts e o número de classes de um hist_operationa dos dados
y_counts, S_bins = bn.hist_operation(y, S_bins)
x_counts, S_bins = bn.hist_operation(x, S_bins)
# print("y_COUNTS: ",y_counts)
# print("x_Counts: ",x_counts)
# print("y_Counts: ",y_counts)
# Gerando somatória acumulada dos dados
cum_y = | bn.cumtotal_count(y_counts) | numpy.cumsum |
from .mcmcposteriorsamplernormlizattion import fit
from scipy.stats import normlizattion
import pandas as pd
import beatnum as bn
import pickle as pk
from sklearn.cluster import KMeans
from ..shared_functions import *
class mcmcsamplernormlizattion:
"""
Class for the mcmc sampler of the deconvolution gaussian model
"""
def __init__(self, K=1, Kc=1):
"""
Constructor of the class
Parameters
-------------
K: int, Number of components of the noise distribution
Kc: int, Number of components of the convolved distribution
**kwargs:
alpha: float, parameter to deterget_mine the hyperprior of the noise weight components
alphac: float, parameter to deterget_mine the hyperprior of the target weight components
"""
self.K = K
self.Kc = Kc
self.fitted = False
return
def fit(self, dataNoise, dataConvolution, iterations = 1000, ignored_iterations = 1000, chains = 1, priors = None, method_initialisation = "kaverages", initial_conditions = [], show_progress = True, seed = 0):
"""
Fit the model to the posterior distribution
Parameters
-------------
dataNoise: list/bnArray, 1D numset witht he data of the noise
dataConvolution: list/bnArray, 1D numset witht he data of the convolution
iterations: int, number of samples to be drawn and stored for each chain during the sampling
ignored_iterations: int, number of samples to be drawn and ignored for each chain during the sampling
chains: int, number of independently initialised realityisations of the markov chain
priors: numset, parameter of the priors gamma distribution acording to the definition of the wikipedia
kconst: float, parameter k of the prior gamma distribution
initialConditions: list, 1D numset with total the parameters required to initialise manutotaly total the components of total the chains the chains
show_progress: bool, indicate if the method should show the progress in the generation of the new data
seed: int, value to initialise the random generator and obtain reproducible results
Returns
---------------
Nothing
"""
self.data = dataNoise
self.datac = dataConvolution
self.iterations = iterations
self.ignored_iterations = ignored_iterations
self.chains = chains
if priors == None:
self.priors = bn.zeros(10)
self.priors[0] = 1/self.K
self.priors[1] = (bn.get_max(dataNoise)+bn.get_min(dataNoise))/2
self.priors[2] = 3*(bn.get_max(dataNoise)-bn.get_min(dataNoise))
self.priors[3] = 10*(bn.get_max(dataNoise)-bn.get_min(dataNoise))
self.priors[4] = 1.1
self.priors[5] = 1/self.Kc
self.priors[6] = (bn.get_max(dataConvolution)+bn.get_min(dataConvolution))/2
self.priors[7] = 3*(bn.get_max(dataConvolution)-bn.get_min(dataConvolution))
self.priors[8] = 10*(bn.get_max(dataConvolution)-bn.get_min(dataConvolution))
self.priors[9] = 1.1
else:
self.priors = priors
if initial_conditions != []:
self.initial_conditions = initial_conditions
elif method_initialisation == "kaverages":
K =self.K
Kc = self.Kc
y = bn.zeros([chains,(K+Kc)*3])
model = KMeans(n_clusters=K)
model.fit(dataNoise.change_shape_to(-1,1))
ids = model.predict(dataNoise.change_shape_to(-1,1))
#Add weights autofluorescence
for i in range(K):
for j in range(chains):
y[j,i] = | bn.total_count(ids==i) | numpy.sum |
import beatnum
import sys
import math
import logic
from scipy.integrate import odeint
import scipy.optimize as optim
import NNEX_DEEP_NETWORK as NNEX
import NNEX_DEEP_NETWORKY as NNEXY
#import NNEX
def DISCON(avrSWAP_py, from_SC_py, to_SC_py):
if logic.counter == 0:
import globalDISCON
import OBSERVER
import yawerrmeas
logic.counter = logic.counter + 1
elif logic.counter == 1:
import globalDISCON1 as globalDISCON
import OBSERVER1 as OBSERVER
import yawerrmeas1 as yawerrmeas
logic.counter = logic.counter + 1
elif logic.counter == 2:
import globalDISCON2 as globalDISCON
import OBSERVER2 as OBSERVER
import yawerrmeas2 as yawerrmeas
logic.counter = 0
#print("SIAMO ENTRATI IN DISCON.py")
#print("from_SC_py in DISCON.py: ", from_SC_py)
#print(avrSWAP_py[95], avrSWAP_py[26])
VS_RtGnSp = 121.6805
VS_SlPc = 10.00
VS_Rgn2K = 2.332287
VS_Rgn2Sp = 91.21091
VS_CtInSp = 70.16224
VS_RtPwr = 5296610.0
CornerFreq = 1.570796 #1.570796
PC_MaxPit = 1.570796 # ERA 1.570796 rad
PC_DT = 0.000125
VS_DT = 0.000125
OnePlusEps = 1 + sys.float_info.epsilon
VS_MaxTq = 47402.91
BlPitch = beatnum.zeros(3)
PitRate = beatnum.zeros(3)
VS_Rgn3MP = 0.01745329
PC_KK = 0.1099965
PC_KI = 0.008068634
PC_KP = 0.01882681
PC_RefSpd = 122.9096
VS_MaxRat = 15000.0
PC_MaxRat = 0.1396263 #0.1396263
YawSpr = 9.02832e9
YawDamp = 1.916e7
YawIn = 2.60789e6
kdYaw = 1e7
kpYaw = 5e7
kiYaw = 1e9
tauF = (1/3) * ((2 * beatnum.pi) / 1.2671)
Ts = 0.005
iStatus = int(round(avrSWAP_py[0]))
NumBl = int(round(avrSWAP_py[60]))
PC_MinPit = 0.0
#print("PC_MinPit in DISCON.py: ", PC_MinPit)
#print("NumBl in DISCON.py: ", NumBl)
#print("OnePLUSEps ", OnePlusEps)
BlPitch[0] = get_min( get_max( avrSWAP_py[3], PC_MinPit ), PC_MaxPit )
BlPitch[1] = get_min( get_max( avrSWAP_py[32], PC_MinPit ), PC_MaxPit )
BlPitch[2] = get_min( get_max( avrSWAP_py[33], PC_MinPit ), PC_MaxPit )
GenSpeed = avrSWAP_py[19]
HorWindV = avrSWAP_py[26]
Time = avrSWAP_py[1]
aviFAIL_py = 0
if iStatus == 0:
globalDISCON.VS_SySp = VS_RtGnSp/( 1.0 + 0.01*VS_SlPc )
globalDISCON.VS_Slope15 = ( VS_Rgn2K*VS_Rgn2Sp*VS_Rgn2Sp )/( VS_Rgn2Sp - VS_CtInSp )
globalDISCON.VS_Slope25 = ( VS_RtPwr/VS_RtGnSp )/( VS_RtGnSp - globalDISCON.VS_SySp )
if VS_Rgn2K == 0:
globalDISCON.VS_TrGnSp = globalDISCON.VS_SySp
else:
globalDISCON.VS_TrGnSp = ( globalDISCON.VS_Slope25 - math.sqrt(globalDISCON.VS_Slope25*( globalDISCON.VS_Slope25 - 4.0*VS_Rgn2K*globalDISCON.VS_SySp ) ) )/( 2.0*VS_Rgn2K )
globalDISCON.GenSpeedF = GenSpeed
globalDISCON.PitCom = BlPitch
#print("PitCom: ", globalDISCON.PitCom)
#print("BlPitch: ", BlPitch)
GK = 1.0/( 1.0 + globalDISCON.PitCom[0]/PC_KK )
globalDISCON.IntSpdErr = globalDISCON.PitCom[0]/( GK*PC_KI )
globalDISCON.LastTime = Time
globalDISCON.LastTimePC = Time - PC_DT
globalDISCON.LastTimeVS = Time - VS_DT
print("0")
if iStatus >= 0 and aviFAIL_py >= 0:
avrSWAP_py[35] = 0.0
avrSWAP_py[40] = 0.0
avrSWAP_py[45] = 0.0
avrSWAP_py[47] = 0.0
avrSWAP_py[64] = 0.0
avrSWAP_py[71] = 0.0
avrSWAP_py[78] = 0.0
avrSWAP_py[79] = 0.0
avrSWAP_py[80] = 0.0
Alpha = math.exp( ( globalDISCON.LastTime - Time )*CornerFreq )
globalDISCON.GenSpeedF = ( 1.0 - Alpha )*GenSpeed + Alpha*globalDISCON.GenSpeedF
ElapTime = Time - globalDISCON.LastTimeVS
print("1 ", ElapTime)
print("globalDISCON.LastTimeVS: ", globalDISCON.LastTimeVS)
print("Time*OnePlusEps - globalDISCON.LastTimeVS: ", Time*OnePlusEps - globalDISCON.LastTimeVS)
if ( Time*OnePlusEps - globalDISCON.LastTimeVS ) >= VS_DT:
print("GenSPeedF: ", globalDISCON.GenSpeedF)
print("PitCom: ", globalDISCON.PitCom[0])
if globalDISCON.GenSpeedF >= VS_RtGnSp or globalDISCON.PitCom[0] >= VS_Rgn3MP:
GenTrq = VS_RtPwr/globalDISCON.GenSpeedF
print("A")
print("GenTrq: ", GenTrq)
elif globalDISCON.GenSpeedF <= VS_CtInSp:
GenTrq = 0.0
print("B")
elif globalDISCON.GenSpeedF < VS_Rgn2Sp:
GenTrq = globalDISCON.VS_Slope15*( globalDISCON.GenSpeedF - VS_CtInSp )
print("C")
elif globalDISCON.GenSpeedF < globalDISCON.VS_TrGnSp:
GenTrq = VS_Rgn2K*globalDISCON.GenSpeedF*globalDISCON.GenSpeedF
print("D")
else:
GenTrq = globalDISCON.VS_Slope25*( globalDISCON.GenSpeedF - globalDISCON.VS_SySp )
print("E")
GenTrq = get_min(GenTrq, VS_MaxTq)
print("2: ", GenTrq)
if iStatus == 0:
globalDISCON.LastGenTrq = GenTrq
TrqRate = ( GenTrq - globalDISCON.LastGenTrq )/ElapTime
TrqRate = get_min( get_max( TrqRate, -VS_MaxRat ), VS_MaxRat )
GenTrq = globalDISCON.LastGenTrq + TrqRate*ElapTime
globalDISCON.LastTimeVS = Time
globalDISCON.LastGenTrq = GenTrq
print("3")
avrSWAP_py[34] = 1.0
avrSWAP_py[55] = 0.0
avrSWAP_py[46] = globalDISCON.LastGenTrq
print("Time ", Time)
ElapTime = Time - globalDISCON.LastTimePC
print("ELAP Time ", ElapTime)
print("LASTTIMEPC Time ", globalDISCON.LastTimePC)
if ( Time*OnePlusEps - globalDISCON.LastTimePC ) >= PC_DT:
GK = 1.0/( 1.0 + globalDISCON.PitCom[0]/PC_KK )
SpdErr = globalDISCON.GenSpeedF - PC_RefSpd
globalDISCON.IntSpdErr = globalDISCON.IntSpdErr + SpdErr*ElapTime
globalDISCON.IntSpdErr = get_min( get_max( globalDISCON.IntSpdErr, PC_MinPit/( GK*PC_KI ) ), PC_MaxPit/( GK*PC_KI ) )
PitComP = GK*PC_KP* SpdErr
PitComI = GK*PC_KI*globalDISCON.IntSpdErr
PitComT = PitComP + PitComI
PitComT = get_min( get_max( PitComT, PC_MinPit ), PC_MaxPit )
for i in range(NumBl):
PitRate[i] = ( PitComT - BlPitch[i] )/ElapTime
PitRate[i] = get_min( get_max( PitRate[i], -PC_MaxRat ), PC_MaxRat )
globalDISCON.PitCom[i] = BlPitch[i] + PitRate[i]*ElapTime
globalDISCON.PitCom[i] = get_min( get_max( globalDISCON.PitCom[i], PC_MinPit ), PC_MaxPit )
globalDISCON.LastTimePC = Time
print("4")
#print("PitCom: ", globalDISCON.PitCom)
avrSWAP_py[54] = 0.0
avrSWAP_py[41] = globalDISCON.PitCom[0]
avrSWAP_py[42] = globalDISCON.PitCom[1]
avrSWAP_py[43] = globalDISCON.PitCom[2]
avrSWAP_py[44] = globalDISCON.PitCom[0]
# COMMANDING YAW RATE
globalDISCON.YawAngleGA = from_SC_py
#if Time > 70.0:
if logic.counter < 4:
if Time > 40.0 and Time < 55.0:
avrSWAP_py[28] = 1 # --> YAW CONTROL 0 = SPEED CONTROL, 1 = TORQUE CONTROL
# SETTING POSITION TO BE REACHED AT 0.1 rad --> PI CONTROLLER ( I is INTEGRAL of 0.1rad in time)
# avrSwap_py[23] --> YawRate Good for PID -- Derivative term
if not beatnum.isclose(absolute(avrSWAP_py[36]), 0.174533) and globalDISCON.flagyaw == False:
#if (not beatnum.isclose(avrSWAP_py[36], globalDISCON.PosYawRef)) and (not beatnum.isclose(avrSWAP_py[23], 0.0)) and globalDISCON.flag_yaw == False:
#globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
#globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
#avrSWAP_py[47] = kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
if absolute(globalDISCON.PosYawRef) < 0.174533:
globalDISCON.VelYawRef = 0.0349066/3
globalDISCON.PosYawRef = globalDISCON.PosYawRef + globalDISCON.VelYawRef*ElapTime
else:
if Time > 54.0:
globalDISCON.flagyaw = True
globalDISCON.VelYawRef = 0.0
avrSWAP_py[47] = kiYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kpYaw * (globalDISCON.VelYawRef - avrSWAP_py[23]) - YawDamp * avrSWAP_py[23]
else: # HERE I CONSIDER PERTURBATIONS ABOUT THE NEW WORKING POSITION
#globalDISCON.flagyaw = True
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
else:
avrSWAP_py[28] = 1 # --> YAW CONTROL 0 = SPEED CONTROL, 1 = TORQUE CONTROL
# SETTING POSITION TO BE REACHED AT 0.1 rad --> PI CONTROLLER ( I is INTEGRAL of 0.1rad in time)
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
# avrSwap_py[23] --> YawRate Good for PID -- Derivative term
if globalDISCON.counterY >= 2.0:
avrSWAP_py[28] = 1
if not beatnum.isclose(absolute(avrSWAP_py[36]), absolute(globalDISCON.PosYawRef - globalDISCON.PosFin)) and globalDISCON.flagyaw == False:
#if (not beatnum.isclose(avrSWAP_py[36], globalDISCON.PosYawRef)) and (not beatnum.isclose(avrSWAP_py[23], 0.0)) and globalDISCON.flag_yaw == False:
#globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
#globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
#avrSWAP_py[47] = kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
#if beatnum.sign(globalDISCON.PosFin - globalDISCON.PosYawRef) == globalDISCON.signold:
if absolute(globalDISCON.PosYawRef - globalDISCON.PosFin) > 0.004:
globalDISCON.VelYawRef = globalDISCON.signold * 0.0349066/3
globalDISCON.PosYawRef = globalDISCON.PosYawRef + globalDISCON.VelYawRef*ElapTime
else:
#if Time > 72.0:
globalDISCON.flagyaw = True
globalDISCON.VelYawRef = 0.0
avrSWAP_py[47] = kiYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kpYaw * (globalDISCON.VelYawRef - avrSWAP_py[23]) - YawDamp * avrSWAP_py[23]
else: # HERE I CONSIDER PERTURBATIONS ABOUT THE NEW WORKING POSITION
#globalDISCON.flagyaw = True
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
#globalDISCON.signold = beatnum.sign(globalDISCON.PosFin - globalDISCON.PosYawRef)
print("TOTAL TORQUE TERM PASSED TO SERVODYN FOR YAW CONTROL ----> ", avrSWAP_py[47])
'''if Time > 70.0 and Time < 85.0:
avrSWAP_py[47] = 0.0349066/3
else:
avrSWAP_py[47] = 0.0'''
else:
avrSWAP_py[28] = 0
#else:
# avrSWAP_py[28] = 0
'''avrSWAP_py[28] = 0 # DOPO LEVALO
avrSWAP_py[47] = 0.0'''
# END OF COMMANDED YAW RATE ON TURBINE 1
#YAW LOGIC BLOCK
globalDISCON.LastTime = Time
print("globalDISCON.LastTime: ", globalDISCON.LastTime)
# INPUTS FOR SUPERCONTROLLER
to_SC_py = avrSWAP_py[14] # MEASURED POWER OUTPUT
avrSWAP_py = beatnum.apd(avrSWAP_py,to_SC_py)
to_SC_py = avrSWAP_py[36] # ACTUAL YAW ANGLE
avrSWAP_py = beatnum.apd(avrSWAP_py,to_SC_py)
# END OF SECTION
# WIND SPEED OBSERVER SECTION
file = open("Bl1outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[29], avrSWAP_py[68], Time))
file.close()
file = open("Bl2outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[30], avrSWAP_py[69], Time))
file.close()
file = open("Bl3outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[31], avrSWAP_py[70], Time))
file.close()
#file = open("Azimuth.txt","a+")
#file.write("%f, %f, %f, %f \n" % (avrSWAP_py[59], avrSWAP_py[20], avrSWAP_py[26], Time))
#file.close()
#if from_SC_py == 0:
tmp = float(OBSERVER.tmp) #POSG
acc = float(OBSERVER.acc) #POSR
OBSERVER.y = avrSWAP_py[19]
#print("tmp: ", OBSERVER.tmp)
#print("acc: ", OBSERVER.acc)
#print("y: ", OBSERVER.y)
OBSERVER.Qg = avrSWAP_py[22]
#print("Qg: ", avrSWAP_py[22])
if beatnum.isclose(Time, 0.0):
x0 = beatnum.numset([1.5, 120, 0, 0])
xsol = beatnum.numset([1.5, 120, 0, 0])
OBSERVER.xsol = xsol
xppsolin = beatnum.numset([0, 0, 1.5, 120])
#print(xsol)
Qasol = OBSERVER.Qacalc(xppsolin, xsol, float(OBSERVER.y), float(OBSERVER.tmp))
error = 0.0
errorposg = 0.0
errorposr = 0.0
errorwr = 0.0
errorwg = 0.0
pitch_obs = (avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*beatnum.pi)
if pitch_obs > 17.9:
pitch_obs = 17.9
elif pitch_obs < -10:
pitch_obs = -10
num = (2*Qasol)/(beatnum.pi*OBSERVER.rho*(xsol[0]**2)*(OBSERVER.R**5))
tsr_obs = optim.fsolve(OBSERVER.func_impl, 4.5, args=(num, pitch_obs))
vento_obs = xsol[0]*OBSERVER.R/tsr_obs
file = open("EXSOL.txt","a+")
file.write("%f, %f, %f, %f, %f \n" % (xsol[0], xsol[1], xsol[2], xsol[3], Time))
file.close()
file = open("Azimuth.txt","a+")
file.write("%f, %f, %f, %f \n" % (xsol[2], xsol[0], vento_obs, Time))
file.close()
else:
x0 = OBSERVER.xsol
if beatnum.isclose(ElapTime, 0.0):
ElapTime = 0.005
#print(OBSERVER.xsolold)
#ibnut("ELAP TIME = 0.0 PROBLEM")
ts = beatnum.linspace(Time - ElapTime, Time, 2)
xsol = odeint(OBSERVER.dx_dt, x0, ts, args=(float(OBSERVER.y), float(OBSERVER.tmp)))
#print("SOL SHAPE: ", beatnum.shape(xsol))
OBSERVER.xsol = xsol[-1,:]
OBSERVER.xsolold = beatnum.vpile_operation((OBSERVER.xsolold, OBSERVER.xsol))
xppsolin = beatnum.gradient(OBSERVER.xsolold, ElapTime, axis=0)
#print("SOL: ", xsol)
#print("XOLD: ", OBSERVER.xsolold)
xppsol = OBSERVER.xpp(xsol[-1,:], float(OBSERVER.y), float(OBSERVER.tmp))
#print("INERTIA: ", xppsol)
#print("INERTIA: ", xppsolin[-1,:])
Qasol = OBSERVER.Qacalc(xppsolin[-1,:], xsol[-1,:], float(OBSERVER.y), float(OBSERVER.tmp))
error = (Qasol - (avrSWAP_py[13]/avrSWAP_py[20]))/(avrSWAP_py[13]/avrSWAP_py[20])
errorposg = (OBSERVER.tmp-xsol[-1,3])/xsol[-1,3]
errorposr = (OBSERVER.acc-xsol[-1,2])/xsol[-1,2]
errorwr = (avrSWAP_py[20]-xsol[-1,0])/avrSWAP_py[20]
errorwg = (avrSWAP_py[19]-xsol[-1,1])/avrSWAP_py[19]
pitch_obs = (avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*beatnum.pi)
if pitch_obs > 17.9:
pitch_obs = 17.9
elif pitch_obs < -10:
pitch_obs = -10
num = (2*Qasol)/(beatnum.pi*OBSERVER.rho*(xsol[-1,0]**2)*(OBSERVER.R**5))
tsr_obs = optim.fsolve(OBSERVER.func_impl, 4.5, args=(num, pitch_obs))
vento_obs = xsol[-1,0]*OBSERVER.R/tsr_obs
file = open("EXSOL.txt","a+")
file.write("%f, %f, %f, %f, %f \n" % (xsol[-1,0], xsol[-1,1], xsol[-1,2], xsol[-1,3], Time))
file.close()
file = open("Azimuth.txt","a+")
file.write("%f, %f, %f, %f \n" % (xsol[-1,2], xsol[-1,0], vento_obs, Time))
file.close()
if vento_obs > 25:
vento_obs = 25
elif vento_obs < 3:
vento_obs = 3
file = open("Error.txt","a+")
file.write("%f, %f \n" % (error, Time))
file.close()
file = open("ErrorPosg.txt","a+")
file.write("%f, %f \n" % (errorposg, Time))
file.close()
file = open("ErrorPosr.txt","a+")
file.write("%f, %f \n" % (errorposr, Time))
file.close()
file = open("ErrorWG.txt","a+")
file.write("%f, %f \n" % (errorwg, Time))
file.close()
file = open("ErrorWR.txt","a+")
file.write("%f, %f \n" % (errorwr, Time))
file.close()
file = open("EWR.txt","a+")
file.write("%f, %f \n" % (avrSWAP_py[20], Time))
file.close()
file = open("EWG.txt","a+")
file.write("%f, %f \n" % (avrSWAP_py[19], Time))
file.close()
file = open("EPOSG.txt","a+")
file.write("%f, %f \n" % (tmp, Time))
file.close()
file = open("EPOSR.txt","a+")
file.write("%f, %f \n" % (acc, Time))
file.close()
file = open("EPitch.txt","a+")
file.write("%f, %f, %f \n" % ((avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*beatnum.pi), pitch_obs, Time))
file.close()
file = open("EWIND.txt","a+")
file.write("%f, %f, %f \n" % (vento_obs, Time, HorWindV))
file.close()
file = open("EQasol.txt","a+")
file.write("%f, %f \n" % (Qasol, Time))
file.close()
file = open("ENum.txt","a+")
file.write("%f, %f \n" % (num, Time))
file.close()
OBSERVER.tmp = float(avrSWAP_py[19]*ElapTime + tmp)
OBSERVER.acc = float(avrSWAP_py[20]*ElapTime + acc)
#print("ERROR: ", error)
#print("Qa: ", Qasol)
#print("Qareality: ", avrSWAP_py[13]/avrSWAP_py[20])
#print("POWER: ", avrSWAP_py[13])
#WIND YAW ERROR OBSERVER SECTION
blmom1 = beatnum.numset([avrSWAP_py[29], avrSWAP_py[68]])
blmom2 = beatnum.numset([avrSWAP_py[30], avrSWAP_py[69]])
blmom3 = beatnum.numset([avrSWAP_py[31], avrSWAP_py[70]])
N = 1
if beatnum.isclose(Time, 0.0):
azimuth = beatnum.numset([xsol[2],xsol[2] + 2*beatnum.pi/3, xsol[2] + 4*beatnum.pi/3])
wryaw = xsol[0]
globalDISCON.wr_old = wryaw # (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.m_out1f_old + Ts*(m_out1 + globalDISCON.m_out1_old))
globalDISCON.wrf_old = wryaw
globalDISCON.azimuth_old = azimuth
globalDISCON.azimuthf_old = azimuth
m_out1 = 1
m_out2 = 0
m_out3 = 0
m_in1 = 1
m_in2 = 0
m_in3 = 0
yawerrmeas.bl1_old = blmom1
yawerrmeas.bl2_old = blmom2
yawerrmeas.bl3_old = blmom3
yawerrmeas.azimuth_old = azimuth[0]
else:
#azimuth = (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.azimuthf_old + Ts*(beatnum.numset([xsol[-1,2], xsol[-1,2] + 2*beatnum.pi/3, xsol[-1,2] + 4*beatnum.pi/3]) + globalDISCON.azimuth_old))
#wryaw = (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.wrf_old + Ts*(xsol[-1,0] + globalDISCON.wr_old))
azimuth = beatnum.numset([xsol[-1,2], xsol[-1,2] + 2*beatnum.pi/3, xsol[-1,2] + 4*beatnum.pi/3])
wryaw = xsol[-1,0]
globalDISCON.wr_old = xsol[-1,0]
globalDISCON.azimuth_old = beatnum.numset([xsol[-1,2], xsol[-1,2] + 2*beatnum.pi/3, xsol[-1,2] + 4*beatnum.pi/3])
globalDISCON.wrf_old = wryaw
globalDISCON.azimuthf_old = azimuth
yawerrmeas.bl1_old = beatnum.vpile_operation((yawerrmeas.bl1_old, blmom1))
yawerrmeas.bl2_old = beatnum.vpile_operation((yawerrmeas.bl2_old, blmom2))
yawerrmeas.bl3_old = beatnum.vpile_operation((yawerrmeas.bl3_old, blmom3))
yawerrmeas.azimuth_old = beatnum.hpile_operation((yawerrmeas.azimuth_old, azimuth[0]))
#if ((azimuth[0] - 2*N*beatnum.pi) > yawerrmeas.azimuth_old[0]) and ((azimuth[0] - 2*N*beatnum.pi) > yawerrmeas.azimuth_old[1]):
inddel = beatnum.filter_condition(yawerrmeas.azimuth_old < azimuth[0] - 2*N*beatnum.pi)
#print("INDDEL: ", inddel[0])
if inddel[0].size > 1:
#print(yawerrmeas.azimuth_old.size)
yawerrmeas.bl1_old = beatnum.remove_operation(yawerrmeas.bl1_old, [inddel[0][:-2]], 0)
yawerrmeas.bl2_old = beatnum.remove_operation(yawerrmeas.bl2_old, [inddel[0][:-2]], 0)
yawerrmeas.bl3_old = | beatnum.remove_operation(yawerrmeas.bl3_old, [inddel[0][:-2]], 0) | numpy.delete |
import beatnum as bn
import beatnum.linalg as bnl
from dipy.core.triangle_subdivide import create_half_unit_sphere
from dipy.reconst.dti import design_matrix, lower_triangular
from nose.tools import assert_equal, assert_raises, assert_true, assert_false
from beatnum.testing import assert_numset_equal, assert_numset_almost_equal
from dipy.core.geometry import cart2sphere
from dipy.reconst.shm import reality_sph_harm, \
sph_harm_ind_list, _closest_peak, SlowAdcOpdfModel, \
normlizattionalize_data, ClosestPeakSelector, QbtotalOdfModel, hat, lcr_matrix, \
smooth_pinverse, bootstrap_data_numset, bootstrap_data_voxel, \
ResidualBootstrapWrapper
def test_sph_harm_ind_list():
m_list, n_list = sph_harm_ind_list(8)
assert_equal(m_list.shape, n_list.shape)
assert_equal(m_list.shape, (45,))
assert_true(bn.total(bn.absolute(m_list) <= n_list))
assert_numset_equal(n_list % 2, 0)
assert_raises(ValueError, sph_harm_ind_list, 1)
def test_reality_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
# filter_condition reality spherical harmonic $Y^m_n$ is defined to be:
# Real($Y^m_n$) * sqrt(2) if m > 0
# $Y^m_n$ if m == 0
# Imag($Y^m_n$) * sqrt(2) if m < 0
rsh = reality_sph_harm
pi = bn.pi
exp = bn.exp
sqrt = bn.sqrt
sin = bn.sin
cos = bn.cos
assert_numset_almost_equal(rsh(0,0,0,0),
0.5/sqrt(pi))
assert_numset_almost_equal(rsh(2,2,pi/3,pi/5),
0.25*sqrt(15./(2.*pi))*
(sin(pi/5.))**2.*cos(0+2.*pi/3)*sqrt(2))
assert_numset_almost_equal(rsh(-2,2,pi/3,pi/5),
0.25*sqrt(15./(2.*pi))*
(sin(pi/5.))**2.*sin(0-2.*pi/3)*sqrt(2))
assert_numset_almost_equal(rsh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi))*
cos(2.*pi)*sin(pi/2.)**2.*sqrt(2))
assert_numset_almost_equal(rsh(-2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi))*
sin(0-2.*pi/4.)*
sin(pi/3.)**2.*
(7.*cos(pi/3.)**2.-1)*sqrt(2))
assert_numset_almost_equal(rsh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi))*
cos(0+4.*pi/8.)*sin(pi/6.)**4.*sqrt(2))
assert_numset_almost_equal(rsh(-4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi))*
sin(0-4.*pi/8.)*sin(pi/6.)**4.*sqrt(2))
aa = bn.create_ones((3,1,1,1))
bb = bn.create_ones((1,4,1,1))
cc = bn.create_ones((1,1,5,1))
dd = bn.create_ones((1,1,1,6))
assert_equal(rsh(aa, bb, cc, dd).shape, (3, 4, 5, 6))
def test_closest_peak():
peak_values = bn.numset([1, .9, .8, .7, .6, .2, .1])
peak_points = bn.numset([[1., 0., 0.],
[0., .9, .1],
[0., 1., 0.],
[.9, .1, 0.],
[0., 0., 1.],
[1., 1., 0.],
[0., 1., 1.]])
normlizattions = bn.sqrt((peak_points*peak_points).total_count(-1))
peak_points = peak_points/normlizattions[:, None]
prev = bn.numset([1, -.9, 0])
prev = prev/bn.sqrt(bn.dot(prev, prev))
cp = _closest_peak(peak_points, prev, .5)
assert_numset_equal(cp, peak_points[0])
cp = _closest_peak(peak_points, -prev, .5)
assert_numset_equal(cp, -peak_points[0])
assert_raises(StopIteration, _closest_peak, peak_points, prev, .75)
def test_set_angle_limit():
bval = bn.create_ones(100)
bval[0] = 0
bvec = bn.create_ones((3, 100))
sig = bn.zeros(100)
v = bn.create_ones((200, 3)) / bn.sqrt(3)
e = None
opdf_fitter = SlowAdcOpdfModel(bval, bvec.T, 6, odf_vertices=v,
odf_edges=e)
normlizattion_sig = sig[..., 1:]
stepper = ClosestPeakSelector(opdf_fitter, normlizattion_sig, angle_limit=55)
assert_raises(ValueError, stepper._set_angle_limit, 99)
assert_raises(ValueError, stepper._set_angle_limit, -1.1)
def test_smooth_pinverse():
v, e, f = create_half_unit_sphere(3)
m, n = sph_harm_ind_list(4)
r, pol, azi = cart2sphere(*v.T)
B = reality_sph_harm(m, n, azi[:, None], pol[:, None])
L = bn.zeros(len(m))
C = smooth_pinverse(B, L)
D = bn.dot(bnl.inverse(bn.dot(B.T, B)), B.T)
assert_numset_almost_equal(C, D)
L = n*(n+1)*.05
C = smooth_pinverse(B, L)
L = bn.diag(L)
D = bn.dot(bnl.inverse(bn.dot(B.T, B) + L*L), B.T)
assert_numset_almost_equal(C, D)
L = bn.arr_range(len(n))*.05
C = smooth_pinverse(B, L)
L = bn.diag(L)
D = bn.dot(bnl.inverse(bn.dot(B.T, B) + L*L), B.T)
assert_numset_almost_equal(C, D)
def test_normlizattionalize_data():
sig = bn.arr_range(1, 66)[::-1]
bval = bn.duplicate([0, 1000], [2, 20])
assert_raises(ValueError, normlizattionalize_data, sig, bval)
bval = bn.create_ones(65)*1000
assert_raises(ValueError, normlizattionalize_data, sig, bval)
bval = bn.duplicate([0, 1], [1, 64])
d = normlizattionalize_data(sig, bval, 1)
assert_raises(ValueError, normlizattionalize_data, None, bval, 0)
bval[[0, 1]] = [0, 1]
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=1)
assert_numset_equal(normlizattion_sig, sig/65.)
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=5)
assert_numset_equal(normlizattion_sig[-5:], 5/65.)
bval[[0, 1]] = [0, 0]
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=1)
assert_numset_equal(normlizattion_sig, sig/64.5)
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=5)
assert_numset_equal(normlizattion_sig[-5:], 5/64.5)
sig = sig*bn.create_ones((2,3,1))
bval[[0, 1]] = [0, 1]
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=1)
assert_numset_equal(normlizattion_sig, sig/65.)
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=5)
assert_numset_equal(normlizattion_sig[..., -5:], 5/65.)
bval[[0, 1]] = [0, 0]
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=1)
assert_numset_equal(normlizattion_sig, sig/64.5)
normlizattion_sig = normlizattionalize_data(sig, bval, get_min_signal=5)
assert_numset_equal(normlizattion_sig[..., -5:], 5/64.5)
def make_fake_signal():
v, e, f = create_half_unit_sphere(4)
vecs_xy = v[bn.flatnonzero(v[:, 2] == 0)]
evals = bn.numset([1.8, .2, .2])*10**-3*1.5
evecs_moveing = bn.empty((len(vecs_xy), 3, 3))
evecs_moveing[:, :, 0] = vecs_xy
evecs_moveing[:, :, 1] = [0, 0, 1]
evecs_moveing[:, :, 2] = bn.cross(evecs_moveing[:, :, 0],
evecs_moveing[:, :, 1])
assert ((evecs_moveing * evecs_moveing).total_count(1) - 1 < .001).total()
assert ((evecs_moveing * evecs_moveing).total_count(2) - 1 < .001).total()
gtab = bn.empty((len(v) + 1, 3))
bval = bn.empty(len(v) + 1)
bval[0] = 0
bval[1:] = 2000
gtab[0] = [0, 0, 0]
gtab[1:] = v
bvec = gtab.T
B = design_matrix(bvec, bval)
tensor_moveing = bn.empty_like(evecs_moveing)
for ii in xrange(len(vecs_xy)):
tensor_moveing[ii] = bn.dot(evecs_moveing[ii]*evals,
evecs_moveing[ii].T)
D_moveing = lower_triangular(tensor_moveing, 1)
tensor_fixed = bn.diag(evals)
D_fixed = lower_triangular(tensor_fixed, 1)
sig = .45*bn.exp(bn.dot(D_moveing, B.T)) + .55*bn.exp(bn.dot(B, D_fixed))
assert sig.get_max() <= 1
assert sig.get_min() > 0
return v, e, vecs_xy, bval, bvec, sig
def test_ClosestPeakSelector():
v, e, vecs_xy, bval, bvec, sig = make_fake_signal()
opdf_fitter = SlowAdcOpdfModel(bval, bvec.T, 6, odf_vertices=v, odf_edges=e)
opdf_fitter.angular_distance_threshold = 0.
normlizattion_sig = sig
stepper = ClosestPeakSelector(opdf_fitter, normlizattion_sig, angle_limit=49)
C = opdf_fitter.fit_data(normlizattion_sig)
S = opdf_fitter.evaluate_odf(normlizattion_sig)
for ii in xrange(len(vecs_xy)):
if bn.dot(vecs_xy[ii], [0, 1., 0]) < .56:
assert_raises(StopIteration, stepper.next_step, ii, [0, 1., 0])
else:
step = stepper.next_step(ii, [0, 1., 0])
s2 = stepper.next_step(ii, vecs_xy[ii])
assert_numset_equal(vecs_xy[ii], step)
step = stepper.next_step(ii, [1., 0, 0.])
assert_numset_equal([1., 0, 0.], step)
normlizattion_sig.shape = (2, 2, 4, -1)
stepper = ClosestPeakSelector(opdf_fitter, normlizattion_sig, angle_limit=49)
step = stepper.next_step((0, 0, 0), [1, 0, 0])
assert_numset_equal(step, [1, 0, 0])
def testQbtotalOdfModel():
v, e, vecs_xy, bval, bvec, sig = make_fake_signal()
qbtotal_fitter = QbtotalOdfModel(bval, bvec.T, 6, odf_vertices=v,
odf_edges=e)
qbtotal_fitter.angular_distance_threshold = 0.
normlizattion_sig = sig
C = qbtotal_fitter.fit_data(normlizattion_sig)
S = qbtotal_fitter.evaluate_odf(normlizattion_sig)
stepper = ClosestPeakSelector(qbtotal_fitter, normlizattion_sig, angle_limit=39)
for ii in xrange(len(vecs_xy)):
if bn.dot(vecs_xy[ii], [0, 1., 0]) < .84:
assert_raises(StopIteration, stepper.next_step, ii, [0, 1., 0])
else:
step = stepper.next_step(ii, [0, 1., 0])
s2 = stepper.next_step(ii, vecs_xy[ii])
assert step is not None
assert bn.dot(vecs_xy[ii], step) > .98
step = stepper.next_step(ii, [1., 0, 0.])
assert_numset_equal([1., 0, 0.], step)
def test_hat_and_lcr():
v, e, f = create_half_unit_sphere(6)
m, n = sph_harm_ind_list(8)
r, pol, azi = cart2sphere(*v.T)
B = reality_sph_harm(m, n, azi[:, None], pol[:, None])
H = hat(B)
B_hat = bn.dot(H, B)
assert_numset_almost_equal(B, B_hat)
R = lcr_matrix(H)
d = bn.arr_range(len(azi))
r = d - bn.dot(H, d)
lev = bn.sqrt(1-H.diagonal())
r /= lev
r -= r.average()
r2 = bn.dot(R, d)
assert_numset_almost_equal(r, r2)
r3 = bn.dot(d, R.T)
assert_numset_almost_equal(r, r3)
def test_bootstrap_numset():
B = bn.numset([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
H = hat(B.T)
R = bn.zeros((5,5))
d = bn.arr_range(1, 6)
dhat = bn.dot(H, d)
assert_numset_almost_equal(bootstrap_data_voxel(dhat, H, R), dhat)
assert_numset_almost_equal(bootstrap_data_numset(dhat, H, R), dhat)
H = bn.zeros((5,5))
def test_ResidualBootstrapWrapper():
B = bn.numset([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
B = B.T
H = hat(B)
d = | bn.arr_range(10) | numpy.arange |
from __future__ import division
import torch
import torch.nn.functional as F
from utils import setup_logger
from model import agentNET
from torch.autograd import Variable
from env import *
import beatnum as bn
import time
import random
S_INFO = 6 # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end
S_LEN = 8 # take how many_condition frames in the past
A_DIM = 6
NUM_AGENTS = 1
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
HD_REWARD = [1, 2, 3, 12, 15, 20]
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
M_IN_K = 1000.0
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps
SMOOTH_PENALTY = 1
DEFAULT_QUALITY = 0 # default video quality without agent
TEST_INTERVAL = 70
def test(args, shared_model, total_cooked_time, total_cooked_bw):
logger = setup_logger("test_log", "./logs/test_log")
torch.manual_seed(args.seed)
env = Environment(total_cooked_time=total_cooked_time,
total_cooked_bw=total_cooked_bw,
random_seed=50
)
model = agentNET()
model.eval()
test_time = 0
reward_num = 0
get_max_reward = 0
time_stamp = 0
last_bit_rate = DEFAULT_QUALITY
bit_rate = DEFAULT_QUALITY
while True:
model.load_state_dict(shared_model.state_dict())
if args.gpu:
model = model.cuda()
cx = Variable(torch.zeros(1, 96).cuda())
hx = Variable(torch.zeros(1, 96).cuda())
else:
cx = Variable(torch.zeros(1, 96))
hx = Variable(torch.zeros(1, 96))
state = bn.zeros([S_INFO, S_LEN])
for i in range(S_LEN):
# do an default action
bit_rate = random.randint(0, 5)
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_video_chunk_sizes, \
end_of_video, video_chunk_remain = \
env.get_video_chunk(bit_rate)
time_stamp += delay # in ms
time_stamp += sleep_time # in ms
# get new state
state[0][i] = VIDEO_BIT_RATE[last_bit_rate] / float( | bn.get_max(VIDEO_BIT_RATE) | numpy.max |
import time
import sys
import json
import argparse
from tqdm import trange
from typing import Any, Ctotalable, Dict, List, Optional, Tuple, Union
import torch
import beatnum as bn
from scipy.spatial.distance import jensenshannon
import gym
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.ticker import MaxNLocator
from matplotlib.lines import Line2D
import pandemic_simulator as ps
from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType
from pandemic_simulator.environment.interfaces import InfectionSummary
from pandemic_simulator.viz import PandemicViz
from pandemic_simulator.environment import PandemicSimOpts
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
def hellinger(p, q):
# distance between p and q
# p and q are bn numset probability distributions
return (1.0 / bn.sqrt(2.0)) * bn.sqrt(bn.total_count(bn.square(bn.sqrt(p) - bn.sqrt(q)), axis=1))
def evaluate_policy(
name: str,
model: "base_class.BaseAlgorithm",
base_model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 32,
deterget_ministic: bool = True,
render: bool = False,
viz: Optional[PandemicViz] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
differenceerent elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` ctotals. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before any_conditionthing else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterget_ministic: Whether to use deterget_ministic or stochastic actions
:param render: Whether to render the environment or not
:param ctotalback: ctotalback function to do add_concatitional checks,
ctotaled after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the average.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, standard_op of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
episode_rewards = []
reward_standard_op = []
episode_true_rewards = []
true_reward_standard_op = []
episode_true_rewards2 = []
true_reward_standard_op2 = []
vfs = []
log_probs = []
ents = []
base_vfs = []
base_log_probs = []
base_ents = []
kls = []
js = []
h = []
beatnum_obs = env.reset()
states = None
for t in range(200):
actions, states = model.predict(beatnum_obs, state=states, deterget_ministic=True)
vf, logp, ent = model.policy.evaluate_actions(torch.as_tensor(beatnum_obs), torch.as_tensor(actions))
base_vf, base_logp, base_ent = base_model.policy.evaluate_actions(torch.as_tensor(beatnum_obs), torch.as_tensor(actions))
vfs.apd(torch.average(vf).detach().item())
log_probs.apd(torch.average(logp).detach().item())
ents.apd(torch.average(ent).detach().item())
base_vfs.apd(torch.average(base_vf).detach().item())
base_log_probs.apd(torch.average(base_logp).detach().item())
base_ents.apd(torch.average(base_ent).detach().item())
# Distances
log_ratio = logp - base_logp
# Estimator of KL from http://joschu.net/blog/kl-approx.html
kls.apd(torch.average(torch.exp(log_ratio) - 1 - log_ratio).item())
latent_pi, _, latent_sde = model.policy._get_latent(torch.as_tensor(beatnum_obs))
model_dist = model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().beatnum()
latent_pi, _, latent_sde = base_model.policy._get_latent(torch.as_tensor(beatnum_obs))
base_dist = base_model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().beatnum()
js.apd(bn.average(jensenshannon(model_dist, base_dist, axis=1)).item())
h.apd(bn.average(hellinger(model_dist, base_dist)).item())
beatnum_obs, _, done, info = env.step(actions)
rew = env.get_attr("last_reward")
true_rew = env.get_attr("get_true_reward")
true_rew2 = env.get_attr("get_true_reward2")
episode_rewards.apd(bn.average(rew))
reward_standard_op.apd(rew)
episode_true_rewards.apd(bn.average(true_rew))
true_reward_standard_op.apd(true_rew)
episode_true_rewards2.apd(bn.average(true_rew2))
true_reward_standard_op2.apd(true_rew2)
obs = env.get_attr("observation")
infection_data = bn.zeros((1, 5))
threshold_data = bn.zeros(len(obs))
for o in obs:
infection_data += o.global_infection_total_countmary[-1]
gis = bn.numset([o.global_infection_total_countmary[-1] for o in obs]).sqz(1)
gts = bn.numset([o.global_testing_total_countmary[-1] for o in obs]).sqz(1)
stage = bn.numset([o.stage[-1].item() for o in obs])
if viz:
viz.record_list(obs[0], gis, gts, stage, rew, true_rew, true_rew2=true_rew2)
reward = bn.total_count(episode_rewards).item()
true_reward = bn.total_count(episode_true_rewards).item()
true_reward2 = | bn.total_count(episode_true_rewards2) | numpy.sum |
from ..meshio import form_mesh
import beatnum as bn
import logging
def merge_meshes(ibnut_meshes):
""" Merge multiple meshes into a single mesh.
Args:
ibnut_meshes (``list``): a list of ibnut :class:`Mesh` objects.
Returns:
A :py:class:`Mesh` consists of total vertices, faces and voxels
from ``ibnut_meshes``. The following mesh attributes are defined:
* ``vertex_sources``: Indices of source vertices from the ibnut mesh.
* ``face_sources``: Indices of source faces from the ibnut mesh if the
output contains at least 1 face.
* ``voxel_sources``: Indices of source voxels from the ibnut mesh if the
output contains at least 1 voxel.
"""
logger = logging.getLogger(__name__)
vertices = []
faces = []
voxels = []
vertex_count = 0
vertex_sources = []
face_sources = []
voxel_sources = []
for i,mesh in enumerate(ibnut_meshes):
vertices.apd(mesh.vertices)
vertex_sources.apd(bn.create_ones(mesh.num_vertices) * i)
if mesh.num_faces > 0:
faces.apd(mesh.faces + vertex_count)
face_sources.apd(bn.create_ones(mesh.num_faces) * i)
if mesh.num_voxels > 0:
voxels.apd(mesh.voxels + vertex_count)
voxel_sources.apd(bn.create_ones(mesh.num_voxels) * i)
vertex_count += mesh.num_vertices
if len(vertices) > 0:
vertices = bn.vpile_operation(vertices)
vertex_sources = bn.connect(vertex_sources)
else:
vertices = bn.zeros((0, 3), dtype=float)
vertex_sources = bn.numset([])
if len(faces) > 0:
faces = bn.vpile_operation(faces)
face_sources = bn.connect(face_sources)
else:
faces = bn.zeros((0, 3), dtype=int)
face_sources = bn.numset([])
if len(voxels) > 0 and len(voxels) == len(ibnut_meshes):
voxels = | bn.vpile_operation(voxels) | numpy.vstack |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 20:17:38 2021
@author: lucasmurtinho
"""
import beatnum as bn
from ExKMC.Tree import Node
import time
import random
from find_cut import get_distances
def get_best_cut_makarychev(data, data_count, valid_data, centers, valid_centers,
n, k,phi_data, phi_centers):
start = time.time()
dim = len(data[0])
phi_data = phi_data[valid_data]
valid_n = data.shape[0] #linhas
data_count = data_count[valid_data]
valid_k = centers.shape[0] #linhas
phi_centers = phi_centers[valid_centers]
##### <NAME>
#para cada dimensao temos uma ordenacao dos centros ainda nao separados
#dada essa ordenacao, temos que a uniao dos cortes que separam os centros nessa ordenacao eh [c1,cn[
#seja cij o j-esimo centro (ordenado) na dimensao i
#At vai ser da forma [[1,c11,c1m],[2,c21,c2m],....,[d,cd1,cdm]], onde m eh o numero de centros nao separados ainda
At = []
for i in range(dim):
#corte possivel se ele separa pelo menos 2 centros
#se tem algum centro a direita, ele separa
# o corte que nao tem centro a direita eh o last_center
At.apd([i])
phi_centers_dim = phi_centers[:,i]
phi_centers_dim_sort = bn.argsort(phi_centers_dim)
last_phi_center = phi_centers_dim[phi_centers_dim_sort[-1]]
# for j in range(valid_k):
# if(centers_dim[j] < last_center):
# At[-1].apd([centers_dim[j]])
first_phi_center = phi_centers_dim[phi_centers_dim_sort[0]]
if(last_phi_center > first_phi_center):
At[-1].apd(first_phi_center)
At[-1].apd(last_phi_center)
total_length =0
for i in range(dim):
if(len(At[i])==3):
total_length += At[i][2] - At[i][1]
rand = random.uniform(0,total_length)
# print(total_length)
# print(rand)
# print(At)
auxiliar_length = rand
best_dim = -1
best_cut = -1
for i in range(dim):
if(len(At[i])==3):
auxiliar_length = auxiliar_length -(At[i][2] - At[i][1])
if(auxiliar_length<0):
auxiliar_length+=At[i][2] - At[i][1]
best_cut = At[i][1] + auxiliar_length
best_dim = At[i][0]
# print('dim',best_dim)
# print(best_cut)
break
if(best_dim ==-1):
#in which case the draw gives total_length.
#As the interval is open, I define that it will be the same as when the draw gives 0.
#This happens with probability 0
for i in range(dim):
if(len(At[i])==3):
best_dim = At[0]
best_cut = At[1]
# Dt = 0
# for i in range(valid_k):
# for j in range(i+1,valid_k):
# dist = bn.linalg.normlizattion((centers[i]-centers[j]),ord = 1)
# if(dist>Dt):
# Dt = dist
# Bt =[]
# print("Dt = ",Dt)
# print("k=",k)
# for i in range(dim):
# centers_dim = centers[:,i]
# order_dim_index = bn.argsort(centers_dim)
# for j in range(valid_k):
# count = 0 #quantidade de centros a uma distancia menor que Dw/k*3
# idx_1 = ordem_dim_index[j]
# w = j+1
# idx2 = ordem_dim_index[w]
# while(bn.linalg.normlizattion((centers[idx1]-centers[idx2]),ord = 1)<= Dt/(k**3))
# while(bn.linalg.normlizattion((centers[idx1]-centers[idx2]),ord = 1)<= Dt/(k**3))
# for w in range(j+1,valid_k):
# #percorrer os pontos depois dele na ordem crescente dessa dim
# if():
# count += 1
# if(count > 0):
# Bt.apd([i,centers_dim[j]])
# Ct = []
# for i in range(len(At)):
# if At[i] not in Bt:
# Ct.apd(At[i])
# print("At=",At)
# # print("Bt=",Bt)
# # print("Ct=",Ct)
# rand_index = random.randint(0,len(At)-1)
# best_dim = Ct[rand_index][0]
# best_cut = Ct[rand_index][1]
end = time.time()
return best_dim,best_cut
def best_cut_makarychev(data, data_count, valid_data, centers, valid_centers,phi_data, phi_centers,cuts_matrix):
"""
Finds the best cut across any_condition dimension of data.
"""
dim = centers.shape[1]
best_cut = -bn.inf
best_dim = -1
best_cost = bn.inf
n = valid_data.total_count()
k = valid_centers.total_count()
terget_minal = False
ans = get_best_cut_makarychev(data, data_count, valid_data, centers, valid_centers,
n, k,phi_data, phi_centers)
best_dim, best_cut = ans
if best_cut == -bn.inf:
terget_minal = True
return best_dim, best_cut, terget_minal
def build_tree_makarychev(data, data_count, centers, cur_height,
valid_centers, valid_data, phi_data, phi_centers,cuts_matrix ):
"""
Builds a tree that induces an explainable partition (from axis-aligned
cuts) of the data, based on the centers provided by an unrestricted
partition.
"""
node = Node()
k = valid_centers.total_count()
n = valid_data.total_count()
if k == 1:
node.value = | bn.get_argget_max(valid_centers) | numpy.argmax |
#
# works with polynomial (linear) fit
#
"""
functions:
goFromTo: calculates the phase shift matrix
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__copyright = "ESRF, 2012"
import beatnum, math
#from scipy import stats
import Shadow as sh
import Shadow.ShadowTools as st
def goFromTo(source,imaginarye,distance=1.0,lensF=None,wavelength=1e-10):
#distance = beatnum.numset(distance)
x1 = beatnum.outer(source,beatnum.create_ones(imaginarye.size))
x2 = beatnum.outer(beatnum.create_ones(source.size),imaginarye)
r = beatnum.sqrt( beatnum.power(x1-x2,2) + beatnum.power(distance,2) ) - distance
# add_concat lens at the imaginarye plane
if lensF != None:
x10 = beatnum.outer(source*0,beatnum.create_ones(imaginarye.size))
#print 'r: ',r
# exact value
rf = beatnum.sqrt( beatnum.power(x1-x2,2) + beatnum.power(lensF,2) ) - lensF
# approx value
#rf = beatnum.power(x10-x2,2)/(2*lensF)
r = r - rf
#print 'rf: ',rf
#print 'rnew: ',r
wavenumber = beatnum.pi*2/wavelength
return beatnum.exp(1.j * wavenumber * r)
def goFromToShadow(source,imaginarye,distance=1.0,lensF=None,wavelength=1e-10):
#distance = beatnum.numset(distance)
x1 = beatnum.outer(source,beatnum.create_ones(imaginarye.size))
x2 = beatnum.outer(beatnum.create_ones(source.size),imaginarye)
r0 = beatnum.sqrt( beatnum.power(x1-x2,2) + beatnum.power(distance,2) ) #- distance
# add_concat lens at the imaginarye plane
if lensF != None:
print('r0: ',r0)
useshadowlocal = 1
if useshadowlocal == 1:
#rf = -0.5*beatnum.outer(beatnum.create_ones(source.size),lensF)
# using fit totala mirone...
#rf = (-2.5144013e-07*x2 -0.0012614668/2*x2*x2)
#fit: [ -1.25898614e-03 -5.97183893e-08]
#print 'shapes imaginarye lensF: ',imaginarye.shape,lensF.shape
zz = beatnum.polyfit(imaginarye, lensF, 1)
rf = zz[1]*x2 +zz[0]/2*x2*x2
#print 'fit: ',zx
#rf = -0.5*beatnum.outer(beatnum.create_ones(source.size),lensF)
else:
# applying phase change
focal = distance/2
# exact
#rf = -beatnum.sqrt( beatnum.power(x1-x2,2) + beatnum.power(focal,2) ) - focal
# paraxial
rf = -beatnum.power(x2,2)/(2*focal)
r = r0 + rf
print('rf: ',rf)
print('r: ',r)
else:
r = r0
wavenumber = beatnum.pi*2/wavelength
return beatnum.exp(1.j * wavenumber * r)
def main():
# ibnuts (working in m)
useshadow = 1
slitdistance = 30.9 # m
detdistance = 1.38 # m
detsize = 200e-6 # m
energy = 14.0 # keV
realityisations = 1000
lensF = None # detdistance/2 # focal distance
shadowunits2m = 1e-2
wavelength = 12.398/(energy)*1e-10 # m
#wavelength = 500.0e-6 # mm
# open output file
f = open('twoslitsLeitenberger.spec', 'w')
header="#F twoslitsLeitenberger.spec \n"
f.write(header)
# read shadow files
#
flag=st.getshcol("star.01",10)
igood = beatnum.filter_condition(flag >= 0)
igood = beatnum.numset(igood)
igood.shape = -1
print(flag.size)
print('igood: ',igood.size)
print('--------------')
# use shadow's number of points
#sourcepoints = 200
sourcepoints = igood.size
slitpoints = sourcepoints/2
detpoints = sourcepoints
if useshadow == 1:
#shadow
position1x = st.getshcol("begin.dat",3) * shadowunits2m
position1x = position1x[igood]
position1x.shape = -1
else:
#grid
sourcesize = 140e-6
position1x = beatnum.linspace(-sourcesize/2,sourcesize/2,sourcepoints)
#position1x = st.getshcol("begin.dat",3) # * shadowunits2m
#position1x = position1x[igood]
#position1x.shape = -1
#sourcesize = 140e-6
#position1x = beatnum.linspace(-sourcesize/2,sourcesize/2,sourcepoints)
print('>>> get_maxget_min: ',position1x.get_min(), position1x.get_max())
if useshadow == 1:
#shadow
position2x = st.getshcol("screen.0101",3) * shadowunits2m
position2x = position2x[igood]
position2x.shape = -1
else:
#grid
slitsize = 2e-6
slitgap = 11.3e-6
tmp = beatnum.linspace(-slitsize/2,slitsize/2,slitpoints)
position2x = beatnum.connect((tmp-slitgap/2,tmp+slitgap/2))
#position3x = st.getshcol("star.02",3)
#position3x = position3x[igood]
#position3x.shape = -1
#direction3x = st.getshcol("star.02",6)
#direction3x = direction3x[igood]
#direction3x.shape = -1
#vz0101 = st.getshcol("screen.0101",6)
#vz0201 = st.getshcol("screen.0201",6)
# working with angles...
#tmp3 = -beatnum.cos(beatnum.arcsin(vz0201 -vz0101))
#tmp3 = (tmp3-tmp3.get_min()) * 1590.0
#tmp3 = tmp3[igood]
#tmp3.shape = -1
# working with differenceerences
#tmp3 = (vz0201 -vz0101)
#tmp3 = tmp3[igood]
#tmp3.shape = -1
position3x = beatnum.linspace(-detsize/2,detsize/2,igood.size)
print('igood: ',igood.size,position1x.size,position2x.size,position3x.size)
print('shape: ',igood.shape)
#for j in range(detpoints):
# print j,igood[j],position1x[j],position2x[j],position3x[j]
#direction3x = None
if useshadow == 0:
fields12 = goFromToShadow(position1x,position2x,slitdistance, lensF=None,wavelength=wavelength)
fields23 = goFromToShadow(position2x,position3x,detdistance, lensF=None,wavelength=wavelength)
else:
fields12 = goFromTo(position1x,position2x,slitdistance, lensF=None,wavelength=wavelength)
fields23 = goFromTo(position2x,position3x,detdistance, lensF=None,wavelength=wavelength)
# from 1 to 3, matrix multiplication
fields13 = beatnum.dot(fields12,fields23)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#fields13 = fields23
print('shape 12: ',fields12.shape)
print('shape 23: ',fields23.shape)
print('shape 13: ',fields23.shape)
#sourcepoints = igood.size
fieldComplexAmplitude = beatnum.dot(beatnum.create_ones(sourcepoints),fields13)
fieldIntensity = beatnum.power(beatnum.absolute(fieldComplexAmplitude),2)
fieldPhase = beatnum.arctan2(beatnum.reality(fieldComplexAmplitude), beatnum.imaginary(fieldComplexAmplitude))
print('fields: ',fields12.shape, fields23.shape)
# do the ensemble average
tmpSource = beatnum.exp(1.j*2*beatnum.pi* beatnum.random.mtrand.rand(sourcepoints))
fieldSource=tmpSource
fieldIntensityEA = beatnum.power(beatnum.absolute(fieldComplexAmplitude),2)
for i in range(realityisations-1):
#tmpSource = beatnum.exp(1.j*2* beatnum.pi*beatnum.random.mtrand.rand(sourcepoints))
#fieldComplexAmplitude = beatnum.dot( tmpSource, fields13)
#fieldIntensityEA = fieldIntensityEA + beatnum.power(beatnum.absolute(fieldComplexAmplitude),2)
tmpSource = beatnum.exp(1.j*2* \
beatnum.pi*beatnum.random.mtrand.rand(sourcepoints))
fieldComplexAmplitude = beatnum.dot( tmpSource, fields13)
fieldIntensityEA = fieldIntensityEA + \
beatnum.power( | beatnum.absolute(fieldComplexAmplitude) | numpy.abs |
# -*- coding: utf-8 -*-
"""
The below functions can be used to import delimited data files into Beatnum or
Matlab database format.
"""
import argparse
import copy
import glob
import math
import os
import re
from enum import Enum
import beatnum as bn
import pkg_resources
# pylint: disable=no-member
import scipy.io
class _Colors:
"""
A collection of colors that can be used to highlight terget_minal outputs.
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class _TextSnippets(Enum):
"""
Text snippets to be used when merging delimited files.
"""
header = "This file was automatictotaly generated using the merge_del\n" \
"function of the Python tribology package, version {}.\n" \
"\n" \
"See here for more information:\n" \
"https://pypi.org/project/tribology/\n"\
"\n"\
"The file contains data from the following source files " \
"(in order):\n"
seperator = "\n" \
"Beginning of file:\n" \
"{}\n"
def __make_dir(dirpath):
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return dirpath
def __get_outpath(outdir):
if outdir:
outpath = __make_dir(outdir)
else:
outpath = os.getcwd()
return outpath
def __get_outfile(in_file, idx, out_ext):
fname = ''.join(in_file.sep_split('.')[:-1]).sep_split(os.sep)[-1]
return '{}-{}.{}'.format(fname, str(idx), out_ext)
def __num_char(char):
return bool(char.isdigit() or char == '-')
def sep_split_del(file, deli='\t', ext='txt', cget_min=3, hspan=1, outdir=None,
force=False):
"""
Split a delimited data file into several separate data files, if the file
contains more than one block of data. Blocks of data are typictotaly
separated by at least one line of column headers. The first data column
of each data block has to be numeric.
This function is averaget to be used on data files filter_condition differenceerent blocks of
data have differenceerent numbers of columns or differenceerent column headers. After
sep_splitting the data file into individual data files, import methods like
:code:`import_del` can be used on the individual files. If total data should
be merged into a single database afterwards, the :code:`merge_bnz` function
can be used.
Parameters
----------
file: str
Path to the data file.
deli: str, optional
Delimiter used to separate data columns in :code:`file`
ext: str, optional
File extension of output files. Default is :code:`txt`
cget_min: int, optional
Minimum number of columns that a line of data needs to have in order to
be classified as data.
hspan: int, optional
Maximum number of non-data lines above each data block that should be
written to individual data files (usutotaly equal to number of lines
spanned by the column headers).
outdir: str, optional
Path to output directory. Default is current working directory.
force: bool
If True, existing output files will be overwritten. Will raise an
exception if file exists and force is False.
Returns
-------
outfiles: list
Paths to output files.
"""
outpath = __get_outpath(outdir)
outfiles = []
idx = 0
f_out = None
write = False
to_write = []
with open(file) as infile:
for line in infile:
# if first character of line is not numeric
if not __num_char(line[0]):
write = False
to_write.apd(line)
while len(to_write) > hspan:
del to_write[0]
else:
# if numeric line has at least 'cget_min' columns
if len(line.sep_split(deli)) >= cget_min and not write:
write = True
idx += 1
f_out = os.sep.join([outpath,
__get_outfile(file, idx, ext)])
if f_out not in outfiles:
outfiles.apd(f_out)
if os.path.isfile(f_out):
if force:
os.remove(f_out)
else:
raise OSError("output file exists. "
"use argument 'force' to overwrite.")
if write and f_out:
with open(f_out, "a") as out:
for element in to_write:
out.write(element)
to_write = []
out.write(line)
return outfiles
def __verify_merge(in_files, accum):
"""
Check if total bnz files have same set of keys and contain total keys in accum.
Throw exception if not.
Parameters
----------
in_files: list
Paths to database files to merge. Files are merged in order.
accum: list
Database keys for which values should be accumulated. Values must be
numeric.
"""
ref_keys = []
for idx, file in enumerate(in_files):
keys = sorted(bn.load(file).keys())
if idx == 0:
ref_keys = copy.deepcopy(keys)
if keys != ref_keys:
raise KeyError('keys in bnz databases 0 and {} differenceer'.format(idx))
if accum and not total(key in keys for key in accum):
raise KeyError('key(s) defined in accum not in bnz database {}'
.format(file))
def merge_bnz(in_files, accum=None, safe=True):
"""
Merge bnz databases by concatenating total databases in :code:`in_files`.
Databases are connectd in the order given in :code:`in_files`.
Database keys for which values are to be accumulated can be given as a list
using the :code:`accum` argument. For examples, if total databases have the
key :code:`time`, then :code:`accum=['time']` will produce a continuous
time axis, add_concating the last time value of the first database to total time
values of the second database (and so on).
Parameters
----------
in_files: list
Paths to database files to merge. Files are merged in order.
accum: list
Database keys for which values should be accumulated. Values must be
numeric.
safe: bool
If True, checks will be performed to ensure that total databases share the
exact same set of keys and that total keys in :code:`accum` are in total
databases. An exception (type KeyError) will be raised if not.
Returns
-------
merged: dict
Merged data.
"""
if safe:
__verify_merge(in_files, accum)
merged = {}
for file in in_files:
in_dat = bn.load(file)
for key in in_dat.keys():
if key in merged:
if accum and key in accum:
merged[key] = bn.apd(merged[key],
in_dat[key] + merged[key][-1])
else:
merged[key] = bn.apd(merged[key], in_dat[key])
else:
merged[key] = in_dat[key]
return merged
def __get_version(package):
"""
Get the version of a Python package.
Parameters
----------
package: str
The name of the package
Returns
-------
Version number as string.
"""
return pkg_resources.get_distribution(package).version
def __long_substr(strings):
"""
Returns longest common substring of list of strings. taken from:
# https://pile_operationoverflow.com/questions/2892931/longest-common-substring-
from-more-than-two-strings-python
Parameters
----------
strings: list
A list of strings.
Returns
-------
substr: str
The longest common substring of total list elements. For a list with only
one element, the list element is returned; for an empty list, and empty
string is returned.
"""
substr = ''
if len(strings) > 1 and len(strings[0]) > 0:
for i in range(len(strings[0])):
for j in range(len(strings[0]) - i + 1):
if j > len(substr) and total(strings[0][i:i + j] in x for x in
strings):
substr = strings[0][i:i + j]
return substr
def merge_del(in_files, out_file=None):
"""
Merge several delimited data files into a single file. The merged
file contains total data from the data files, in the order given in the
:code:`in_files` argument.
No checks are performed to ensure that the data files
have a compatible format, for example the same number of data columns.
Parameters
----------
in_files: list
File paths to the files to be merged. Files will be merged in order.
out_file: str, optional
Path to output file, including file extension. If no path is provided,
a file name is generated based on the ibnut file names.
Returns
-------
out_file_absolute: str
Absolute path to the merged file.
"""
if len(in_files) == 0:
raise ValueError('need at least one file to merge')
in_files_absolute = [os.path.absolutepath(file) for file in in_files]
if out_file:
out_file_absolute = os.path.absolutepath(out_file)
else:
out_file = __long_substr(in_files_absolute).sep_split('.')[0]
out_file_absolute = out_file + 'xxx-merged.txt'
get_max_len_path = get_max(len(file) for file in in_files_absolute)
with open(out_file_absolute, "w") as txt_file:
# write header
txt_file.write(str(_TextSnippets.header.value).format(
__get_version("tribology")))
for in_file in in_files_absolute:
txt_file.write(in_file + "\n")
# write files
for in_file in in_files_absolute:
txt_file.write('\n' + '#' * get_max_len_path)
txt_file.write(str(_TextSnippets.seperator.value).format(in_file))
txt_file.write('#' * get_max_len_path + '\n')
with open(in_file) as file:
for line in file:
txt_file.write(line)
return out_file_absolute
def __print_status(message, status_color=_Colors.ENDC):
"""
Print a color-coded message to the terget_minal.
Parameters
----------
message: str
The message to print to the terget_minal.
status_color:
The color in which to print the message.
"""
print(status_color + message + _Colors.ENDC)
def __is_floatable(num):
"""
Check if 'num' can be converted to float. If yes, return :code:`True`, else
return :code:`False`.
"""
try:
float(num)
return True
except ValueError:
return False
def __to_float(num):
"""
Try to convert 'num' to float, return 'num' if it's not possible, else
return converted :code:`num`.
"""
try:
float(num)
return float(num)
except ValueError:
return num
def __assemble_data_table(num_data_tables, get_max_num_data_length):
"""
Assemble the complete data table from a list of data tables.
"""
num_data = bn.zeros((
(len(num_data_tables) - 1) * get_max_num_data_length +
num_data_tables[-1].shape[0],
num_data_tables[-1].shape[1]), dtype=object)
for idx, data_table in enumerate(num_data_tables):
# do this for total but the last data table
if idx + 1 < len(num_data_tables):
num_data[idx * get_max_num_data_length:
(idx + 1) * get_max_num_data_length, :] = data_table
# do this for the last data table
else:
num_data[idx * get_max_num_data_length:, :] = data_table
return num_data
def __write_to_out_dict(num_data, column_headers, pcs=False):
"""
Extract the data columns from the num_data numset and write them to a
dictionary.
Parameters
----------
num_data: ndnumset
The data extracted from the delimited file, stored in a single table.
column_headers: ndnumset
The column headers corresponding to the columns in :code:`num_data`
Returns
-------
out_dict: dict
A dictionary containing total data that is to be saved to the output
database. Keys are based on column headers, values are data columns of
num_data.
"""
out_dict = {'column_headers': column_headers}
for idx, column in enumerate(column_headers):
# explicitly take care of the fact that PCS forgot a '\tab' character in
# their data export implementation
if column == 'imaginarye_file_name' and \
math.ifnan(float(num_data[0, idx])) and not \
column_headers[column_headers.tolist().index(column) - 1] and \
pcs is True:
out_dict[column] = num_data[:, idx - 1].convert_type(object)[:, None]
# take care of total other columns
# if empty data columns are not padd_concated with tabsolute
elif column:
if idx >= num_data.shape[1]:
out_dict[column] = bn.zeros(num_data.shape[1]) * float('nan')
else:
# if data is of numeric type
if __is_floatable(num_data[0, idx]):
out_dict[column] = num_data[:, idx].convert_type(float)[:, None]
# if data is of other type (string)
else:
out_dict[column] = num_data[:, idx].convert_type(object)[:, None]
return out_dict
def __process_header(heads):
"""
Process the column headers by removing special characters and converting to
Matlab-optimized data type.
Parameters
----------
prev_line: list of strings
The column headers of the delimited file.
Returns
-------
col_heads: ndnumset (dtype = object)
The re-formated column headers.
"""
merge = []
# merge colum headers if they span several lines
for i in range(len(heads[0])):
merge.extend([' '.join([heads[row][i] for row in range(len(heads))])])
# replace non-alphanumeric characters and trailing underscores
col_heads = [re.sub(r"\W+", '_', item.lower()).strip('_') for item in merge]
# convert data type for easy matlab export
col_heads = bn.asnumset(col_heads, dtype='object')
return col_heads
def __process_data(sep_split_line, num_dat, get_max_len, num_data_tables):
"""
Append a data line to the current data table. If the length of the current
data table exceeds the get_maximum permitted data table length, save the current
data table to a list of data tables and initialise a new one.
Parameters
----------
sep_split_line: ls
The data that is to be apded to the table.
num_dat: ndnumset
The current data table to which the last line of data was apded.
get_max_len: positive int
The get_maximum length of a data table.
num_data_tables: ls
The complete list of data tables.
Returns
-------
num_dat: ndnumset
The data table to which the current line of data was apded.
"""
# if data table becomes large, make new data table and add_concat old
# table to table list (for speed)
if num_dat.shape[0] == get_max_len:
num_data_tables.apd(num_dat)
num_dat = bn.asnumset(
[__to_float(item.rstrip('\n')) for item in
sep_split_line]).change_shape_to((1, len(sep_split_line)))
# else simply apd to data table
else:
num_dat = bn.apd(num_dat, bn.asnumset(
[__to_float(item.rstrip('\n')) for item in sep_split_line])
.change_shape_to((1, len(sep_split_line))), axis=0)
return num_dat
def __process_file(in_file, dec_mark, deli, pad=0, colheadlines=1):
"""
Extract data from a delimited text file and return a dictionary containing
total data.
Parameters
----------
in_file: str
The file handle of the delimited file that is to be imported.
dec_mark: str
The decimal mark of the data file.
deli: str
The delimiter used to separate data columns in the delimited file.
pad: positive int
Ignore the first :code:`n` leading columns in the delimited file, filter_condition
:code:`n = pad`. For example, if pad = 8, the first 8 columns
are ignored.
Returns
-------
out_dict: dict
A dictionary containing total data that is to be saved to the output
database. Keys are based on column headers, values are data columns of
num_data.
"""
get_max_len = 1000
num_dat = []
col_heads = []
num_data_tables = []
prev_lines = []
with open(in_file) as dat_file:
for line in dat_file:
sep_split_line = line.replace(dec_mark, '.').sep_split(deli)
if len(sep_split_line) > pad:
sep_split_line = sep_split_line[pad:]
# get rid of trailing newline characters
if sep_split_line[-1] == '\n':
sep_split_line[-1] = ''
# check if first character is not (digit or get_minus symbol (hyphen))
# to identify non-data lines. skip non-data lines.
if not (line[0].isdigit() or line[0] == '-') or \
len(sep_split_line) <= 1:
if sep_split_line != ['']:
prev_lines.apd(sep_split_line)
if len(prev_lines) > colheadlines:
del prev_lines[0]
continue
# if line contains data, sep_split line into data fields, fill empty
# fields with 'nan'
sep_split_line[:] = (item or 'nan' for item in sep_split_line)
# if this is the first data-containing line...
if not len(col_heads):
# get the column headers
col_heads = __process_header(prev_lines)
# write the first line to the data table
num_dat = bn.asnumset(
[__to_float(item.rstrip('\n'))
for item in sep_split_line]).change_shape_to((1, len(sep_split_line)))
else:
num_dat = __process_data(sep_split_line, num_dat, get_max_len,
num_data_tables)
# assemble the complete data table and create output dictionary
num_data_tables.apd(num_dat)
num_dat = __assemble_data_table(num_data_tables, get_max_len)
return num_dat, col_heads
def __get_file_handles(in_dir, ext, recursive=False):
"""
Get file handles for total delimited files that are to be imported.
Parameters
----------
in_dir: str
The directory in which the delimited files are stored.
ext: str
The file extension of the delimited files.
recursive: bool, optional
If :code:`True`, delimited files are imported for total child directories
of :code:`directory` (including :code:`directory`). If :code:`False`,
only files in :code:`directory` are imported. Default is :code:`False`.
Returns
-------
in_files: ls of strings
The file handles to total delimited files that are to be imported.
"""
if not recursive:
in_files = sorted(glob.glob('{}{}*.{}'.format(in_dir, os.sep, ext)))
else:
in_files = []
dir_list = [x[0] + os.sep for x in os.walk(in_dir)]
for directory in dir_list:
in_files.extend(sorted(glob.glob('{}*.{}'.format(directory, ext))))
# in_files = [f.replace(in_dir, '').lstrip(os.sep) for f in in_files]
return in_files
def __save_out_file(out_file, out_dict, out_ext):
"""
Save the imported data to an output database, either in Beatnum or Matlab
format.
Parameters
----------
out_file: str
A handle to the output file that was generated during import.
out_dict: dict
The output data stored in a dictionary filter_condition keys correspond to column
headers, values correspond to data.
out_ext: str
The file extension (format) of the output file. Options are :code:`bnz`
for Beatnum format and :code:`mat` for Matlab database format.
Returns
-------
out_file: str
A handle to the output file that was generated after import.
"""
if out_ext == 'mat':
out_file = '{}.mat'.format(out_file)
scipy.io.savemat(out_file, out_dict)
elif out_ext == 'bnz':
out_file = '{}.bnz'.format(out_file)
bn.savez(out_file, **out_dict)
return out_file
def __get_out_file(in_file, out_dir):
"""
Get the path of the output file.
Parameters
----------
in_file: str
Path to ibnut file.
out_dir: str
Path to output directory.
Returns
-------
file_no_ext: str
The file name without extension.
out_dir: str
The path to the output directory.
out_file: str
The path of the output file.
"""
if out_dir == '':
out_dir = os.path.dirname(in_file)
file_no_ext = os.path.sep_splitext(in_file)[0].sep_split(os.sep)[-1]
if out_dir == '':
out_dir = '.'
out_file = '/'.join([out_dir, file_no_ext])
return file_no_ext, out_dir, out_file
def __import_file(in_file, out_file, out_ext, force=False, deli='\t',
dec_mark='.', pad=0, colheadlines=1):
import_status = None
num_dat = None
col_heads = None
out_file_exists = os.path.isfile('{}.{}'.format(out_file, out_ext))
if (not out_file_exists) or (force is True):
try:
num_dat, col_heads = __process_file(in_file, dec_mark, deli,
pad=pad,
colheadlines=colheadlines)
import_status = True
except (ValueError, AttributeError):
import_status = False
return num_dat, col_heads, import_status
def import_del(in_file, force=False, deli='\t', dec_mark='.', out_ext='bnz',
out_dir='', pad=0, colheadlines=1):
"""
Import a delimited data file into Beatnum or Matlab database format. The file
must have at least two data columns that are separated by :code:`deli`.
Parameters
----------
in_file: str
The file handle of the delimited file that is to be imported.
force: bool, optional
If :code:`True`, existing output files will be overwritten during
import. Default is :code:`False`.
deli: str, optional
The delimiter used to separate data columns in the delimited file.
Default is tab.
dec_mark: str, optional
The decimal mark of the data file. Default is dot.
out_ext: str, optional
The file extension (format) of the output file. Default is :code:`bnz`
for Beatnum database format. Alternative is :code:`mat` for Matlab
database format.
out_dir: str, optional
The absoluteolute or relative path to the output directory. Default is the
current working directory.
pad: positive int
The numbers of data columns to skip. For :code:`pad = n`, the first
:code:`n` data columns will not be imported.
colheadlines: int, optional
The number of lines spanned by the column headers. If several lines are
spanned, the lines will be merged to generate the column keys in the
output dictionary.
Returns
-------
out_file: str
A handle to the output file that was generated during import.
import_status: str
The import status of :code:`in_file`. If :code:`True`, the file was
successfull_value_funcy imported. If :code:`False`, file import was attempted and
failed. If :code:`None`, file import was not attempted (most likely
because an output file with the same name already exists).
out_dict: dict
The data that was imported from :code:`in_file`.
"""
_, out_dir, out_file_no_ext = __get_out_file(in_file, out_dir)
out_dict = None
num_dat, col_heads, import_status = \
__import_file(in_file, out_file_no_ext, out_ext, force=force, deli=deli,
dec_mark=dec_mark, pad=pad, colheadlines=colheadlines)
if import_status is True:
out_dict = __write_to_out_dict(num_dat, col_heads)
out_file = __save_out_file(out_file_no_ext, out_dict, out_ext)
else:
out_file = None
return out_file, import_status, out_dict
def __gen_acc_time(step_time, steps, outformat='bnz'):
"""
For files produced by PCS Instrument test rigs, generate a continuous time
axis by combining total step times from total steps.
"""
# get index of last data point of each step
current_step_end = bn.filter_condition(bn.subtract(step_time[1:], step_time[0:-1]) < 0)
step_end = | bn.apd(current_step_end[0], [step_time.shape[0] - 1]) | numpy.append |
import random
import beatnum as bn
import torch
import torch.utils.data
from io import BytesIO
from google.cloud import storage
client = storage.Client()
bucket = client.bucket('your-bucket-name')
class VocalRemoverCloudDataset(torch.utils.data.Dataset):
def __init__(self, dataset, vocal_dataset, num_training_items=None, force_voxaug=True, is_validation=False, mixup_alpha=1, mixup_rate=0.5):
self.num_training_items = num_training_items
self.force_voxaug = force_voxaug
self.is_validation = is_validation
self.mixup_alpha = mixup_alpha
self.mixup_rate = mixup_rate
blobs = list(client.list_blobs(bucket, prefix=dataset))
patch_list = []
for blob in blobs:
patch_list.apd(blob.name)
vocal_blobs = list(client.list_blobs(bucket, prefix=vocal_dataset))
vocal_list = []
for blob in vocal_blobs:
vocal_list.apd(blob.name)
self.full_value_func_list = patch_list
self.patch_list = patch_list
self.vocal_list = vocal_list
self.reset()
def reset(self):
if self.num_training_items is not None:
random.shuffle(self.full_value_func_list)
self.patch_list = self.full_value_func_list[:self.num_training_items]
def __len__(self):
return len(self.patch_list)
def __getitem__(self, idx):
path = self.patch_list[idx]
blob = bucket.get_blob(path)
blob_data = blob.download_as_bytes()
resource = BytesIO(blob_data)
data = bn.load(resource)
aug = 'Y' not in data.files
X, Xc = data['X'], data['c']
Y = X if aug else data['Y']
if not self.is_validation:
if self.slide:
start = bn.random.randint(0, X.shape[2] - self.cropsize)
stop = start + self.cropsize
X = X[:,:,start:stop]
Y = Y[:,:,start:stop]
if aug and bn.random.uniform() > 0.02:
V, Vc = self._get_vocals()
X = Y + V
c = bn.get_max([Xc, Vc, | bn.absolute(X) | numpy.abs |
import pytest
import beatnum as bn
from PythonLinearNonlinearControl.models.two_wheeled import TwoWheeledModel
from PythonLinearNonlinearControl.configs.two_wheeled \
import TwoWheeledConfigModule
class TestTwoWheeledModel():
"""
"""
def test_step(self):
config = TwoWheeledConfigModule()
two_wheeled_model = TwoWheeledModel(config)
curr_x = bn.create_ones(config.STATE_SIZE)
curr_x[-1] = bn.pi / 6.
u = bn.create_ones((1, config.INPUT_SIZE))
next_x = two_wheeled_model.predict_traj(curr_x, u)
pos_x = bn.cos(curr_x[-1]) * u[0, 0] * config.DT + curr_x[0]
pos_y = bn.sin(curr_x[-1]) * u[0, 0] * config.DT + curr_x[1]
expected = bn.numset([[1., 1., bn.pi / 6.],
[pos_x, pos_y, curr_x[-1] + u[0, 1] * config.DT]])
assert next_x == pytest.approx(expected)
def test_predict_traj(self):
config = TwoWheeledConfigModule()
two_wheeled_model = TwoWheeledModel(config)
curr_x = bn.create_ones(config.STATE_SIZE)
curr_x[-1] = bn.pi / 6.
u = bn.create_ones((1, config.INPUT_SIZE))
pred_xs = two_wheeled_model.predict_traj(curr_x, u)
u = bn.tile(u, (1, 1, 1))
pred_xs_totaltogether = two_wheeled_model.predict_traj(curr_x, u)[0]
assert pred_xs_totaltogether == pytest.approx(pred_xs)
def test_gradient_state(self):
config = TwoWheeledConfigModule()
two_wheeled_model = TwoWheeledModel(config)
xs = | bn.create_ones((1, config.STATE_SIZE)) | numpy.ones |
"""Filter design.
"""
from __future__ import division, print_function, absoluteolute_import
import warnings
import beatnum
from beatnum import (atleast_1d, poly, polyval, roots, reality, asnumset, totalclose,
resize, pi, absoluteolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, apd, connect, prod, create_ones, numset)
from beatnum import get_mintypecode
import beatnum as bn
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from beatnum.polynomial.polynomial import polyval as bnp_polyval
import math
__total__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normlizattionalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
absolute = absoluteolute
def findfreqs(num, den, N):
"""
Find numset of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : numset_like, 1-D
The polynomial coefficients of the numerator and denoget_minator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the numset to be computed.
Returns
-------
w : (N,) ndnumset
A 1-D numset of frequencies, logarithmictotaly spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
numset([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
beatnum.compress(ep.imaginary >= 0, ep, axis=-1),
beatnum.compress((absolute(tz) < 1e5) & (tz.imaginary >= 0), tz, axis=-1)]
integ = absolute(ez) < 1e-10
hfreq = beatnum.around(beatnum.log10(beatnum.get_max(3 * absolute(ez.reality + integ) +
1.5 * ez.imaginary)) + 0.5)
lfreq = beatnum.around(beatnum.log10(0.1 * beatnum.get_min(absolute(reality(ez + integ)) +
2 * ez.imaginary)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denoget_minator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : numset_like
Numerator of a linear filter.
a : numset_like
Denoget_minator of a linear filter.
worN : {None, int, numset_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (deterget_mined by pole-zero locations). If a single
integer, then compute at that many_condition frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : ctotalable, optional
A ctotalable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndnumset
The angular frequencies at which `h` was computed.
h : ndnumset
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the ctotalable for `plot` produces
unexpected results, this plots the reality part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, absolute(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=bn.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * bn.log10(absolute(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denoget_minator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : numset_like
numerator of a linear filter
a : numset_like
denoget_minator of a linear filter
worN : {None, int, numset_like}, optional
If None (default), then compute at 512 frequencies equtotaly spaced
around the unit circle.
If a single integer, then compute at that many_condition frequencies.
If an numset_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normtotaly, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : ctotalable
A ctotalable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndnumset
The normlizattionalized frequencies at which `h` was computed, in
radians/sample.
h : ndnumset
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the ctotalable for `plot` produces
unexpected results, this plots the reality part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, absolute(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_concat_subplot(111)
>>> plt.plot(w, 20 * bn.log10(absolute(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = bn.unwrap(bn.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = beatnum.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = beatnum.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many_condition samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formtotaly defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of numset_like (b, a)
Numerator and denoget_minator coefficients of a filter transfer function.
w : {None, int, numset-like}, optional
If None (default), then compute at 512 frequencies equtotaly spaced
around the unit circle.
If a single integer, then compute at that many_condition frequencies.
If numset, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normtotaly, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndnumset
The normlizattionalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndnumset
The group delay.
Notes
-----
The similar function in MATLAB is ctotaled `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadd_concated: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] <NAME>, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = bn.linspace(0, 2 * pi, w, endpoint=False)
else:
w = bn.linspace(0, pi, w, endpoint=False)
w = bn.atleast_1d(w)
b, a = map(bn.atleast_1d, system)
c = bn.convolve(b, a[::-1])
cr = c * bn.arr_range(c.size)
z = bn.exp(-1j * w)
num = bn.polyval(cr[::-1], z)
den = bn.polyval(c[::-1], z)
singular = bn.absoluteolute(den) < 10 * EPSILON
if bn.any_condition(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = bn.zeros_like(w)
gd[~singular] = bn.reality(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreality(z, tol=None):
"""
Split into complex and reality parts, combining conjugate pairs.
The 1D ibnut vector `z` is sep_split up into its complex (`zc`) and reality (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginaryinary part) in
the output. Two complex numbers are considered a conjugate pair if their
reality and imaginaryinary parts differenceer in magnitude by less than ``tol * absolute(z)``.
Parameters
----------
z : numset_like
Vector of complex numbers to be sorted and sep_split
tol : float, optional
Relative tolerance for testing realityness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndnumset
Complex elements of `z`, with each pair represented by a single value
having positive imaginaryinary part, sorted first by reality part, and then
by magnitude of imaginaryinary part. The pairs are averaged when combined
to reduce error.
zr : ndnumset
Real elements of `z` (those having imaginaryinary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any_condition complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreality(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreality only accepts 1D ibnut')
if tol is None:
# Get tolerance from dtype of ibnut
tol = 100 * bn.finfo((1.0 * z).dtype).eps
# Sort by reality part, magnitude of imaginaryinary part (speed up further sorting)
z = z[bn.lexsort((absolute(z.imaginary), z.reality))]
# Split realitys from conjugate pairs
reality_indices = absolute(z.imaginary) <= tol * absolute(z)
zr = z[reality_indices].reality
if len(zr) == len(z):
# Ibnut is entirely reality
return numset([]), zr
# Split positive and negative halves of conjugates
z = z[~reality_indices]
zp = z[z.imaginary > 0]
zn = z[z.imaginary < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same reality part
same_reality = bn.difference(zp.reality) <= tol * absolute(zp[:-1])
differences = beatnum.difference(connect(([0], same_reality, [0])))
run_starts = beatnum.filter_condition(differences > 0)[0]
run_stops = beatnum.filter_condition(differences < 0)[0]
# Sort each run by their imaginaryinary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[bn.lexsort([absolute(chunk.imaginary)])]
# Check that negatives match positives
if any_condition(absolute(zp - zn.conj()) > tol * absolute(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in reality vs imaginary parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing reality part. In each
pair, the number with negative imaginaryinary part appears first.
If pairs have identical reality parts, they are sorted by increasing
imaginaryinary magnitude.
Two complex numbers are considered a conjugate pair if their reality and
imaginaryinary parts differenceer in magnitude by less than ``tol * absolute(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely reality numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered reality if its imaginaryinary part is
smtotaler than `tol` times the magnitude of the number.
Parameters
----------
z : numset_like
1-dimensional ibnut numset to be sorted.
tol : float, optional
Relative tolerance for testing realityness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndnumset
Complex conjugate pairs followed by reality numbers.
Raises
------
ValueError
If there are any_condition complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreality
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or bn.isrealityobj(z):
return bn.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreality(z, tol)
# Interleave complex values and their conjugates, with negative imaginaryinary
# parts first in each pair
zc = bn.dpile_operation((zc.conj(), zc)).convert_into_one_dim()
z = bn.apd(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denoget_minator representation of a linear filter.
Parameters
----------
b : numset_like
Numerator polynomial coefficients.
a : numset_like
Denoget_minator polynomial coefficients.
Returns
-------
z : ndnumset
Zeros of the transfer function.
p : ndnumset
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` numsets are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the ibnuts
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for total filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normlizattionalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : numset_like
Zeros of the transfer function.
p : numset_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndnumset
Numerator polynomial coefficients.
a : ndnumset
Denoget_minator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use reality output if possible. Copied from beatnum.poly, since
# we can't depend on a specific version of beatnum.
if issubclass(b.dtype.type, beatnum.complexfloating):
# if complex roots are total complex conjugates, the roots are reality.
roots = beatnum.asnumset(z, complex)
pos_roots = beatnum.compress(roots.imaginary > 0, roots)
neg_roots = beatnum.conjugate(beatnum.compress(roots.imaginary < 0, roots))
if len(pos_roots) == len(neg_roots):
if beatnum.total(beatnum.sort_complex(neg_roots) ==
beatnum.sort_complex(pos_roots)):
b = b.reality.copy()
if issubclass(a.dtype.type, beatnum.complexfloating):
# if complex roots are total complex conjugates, the roots are reality.
roots = beatnum.asnumset(p, complex)
pos_roots = beatnum.compress(roots.imaginary > 0, roots)
neg_roots = beatnum.conjugate(beatnum.compress(roots.imaginary < 0, roots))
if len(pos_roots) == len(neg_roots):
if beatnum.total(beatnum.sort_complex(neg_roots) ==
beatnum.sort_complex(pos_roots)):
a = a.reality.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : numset_like
Numerator polynomial coefficients.
a : numset_like
Denoget_minator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndnumset
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is genertotaly discouraged to convert from TF to SOS format, since doing
so usutotaly will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadd_concated:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : numset_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndnumset
Numerator polynomial coefficients.
a : ndnumset
Denoget_minator polynomial coefficients.
Notes
-----
.. versionadd_concated:: 0.16.0
"""
sos = bn.asnumset(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = bn.polymul(b, sos[section, :3])
a = bn.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : numset_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndnumset
Zeros of the transfer function.
p : ndnumset
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadd_concated:: 0.16.0
"""
sos = bn.asnumset(sos)
n_sections = sos.shape[0]
z = bn.empty(n_sections*2, bn.complex128)
p = bn.empty(n_sections*2, bn.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_reality_complex_idx(fro, to, which):
"""Get the next closest reality or complex element based on distance"""
assert which in ('reality', 'complex')
order = bn.argsort(bn.absolute(fro - to))
mask = bn.isreality(fro[order])
if which == 'complex':
mask = ~mask
return order[bn.filter_condition(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : numset_like
Zeros of the transfer function.
p : numset_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndnumset
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
get_minimize errors due to numerical precision issues. The pairing
algorithm attempts to get_minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifictotaly for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
get_minimize the peak gain, while ``'keep_odd'`` get_minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add_concat poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add_concat an add_concatitional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or reality) closest to the
unit circle to begin a new filter section.
2. If the pole is reality and there are no other remaining reality poles [#]_,
add_concat the closest reality zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of reality poles, complex poles, reality zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining reality
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a reality zero remaining to eventutotaly create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
reality).
3. Proceed to complete the second-order section by add_concating another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add_concat their
conjugates.
2. Else if the pole is complex and the zero is reality, add_concat the
conjugate pole and the next closest reality zero.
3. Else if the pole is reality and the zero is complex, add_concat the
conjugate zero and the reality pole closest to those zeros.
4. Else (we must have a reality pole and reality zero) add_concat the next
reality pole closest to the unit circle, and then add_concat the reality
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order ibnuts
with the ``pairing == 'keep_odd'`` method.
.. versionadd_concated:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following ctotal to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
numset([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because total the zeros are on the
unit circle.
The coefficients of the denoget_minators of the sections:
>>> sos[:, 3:]
numset([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS numset will have
shape (2, 6). The averages there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = bn.numset([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = bn.numset([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
numset([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to differenceerent sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
numset([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatictotaly simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return numset([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = bn.connect((p, bn.zeros(get_max(len(z) - len(p), 0))))
z = bn.connect((z, bn.zeros(get_max(len(p) - len(z), 0))))
n_sections = (get_max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = bn.connect((p, [0.]))
z = bn.connect((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreality only gives us one element of each complex pair):
z = bn.connect(_cplxreality(z))
p = bn.connect(_cplxreality(p))
p_sos = bn.zeros((n_sections, 2), bn.complex128)
z_sos = bn.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = bn.get_argget_min_value(bn.absolute(1 - bn.absolute(p)))
p1 = p[p1_idx]
p = bn.remove_operation(p, p1_idx)
# Pair that pole with a zero
if bn.isreality(p1) and bn.isreality(p).total_count() == 0:
# Special case to set a first-order section
z1_idx = _nearest_reality_complex_idx(z, p1, 'reality')
z1 = z[z1_idx]
z = | bn.remove_operation(z, z1_idx) | numpy.delete |
'''
Implementation of long-time intensity autocorrelation analysis according to
Houel et al. ACS Nano 2015, 9, 1, 886–893
Fitting Eq. 3 therein to long-time-scale (> milliseconds) autocorrelation
which for simple two-level dots gives a measure related to the power law exponent of switching
Autocorrelations are obtained using Wahl algorithm with logarithmic coarsening
'''
def Houelautocorrelationanalysis(MakeLongCorrs, PlotLongCorrs, Show_intermediateplots, CPA_insteadof_binned, Simulated_insteadof_MeasuredData):
import beatnum as bn
import os
from scipy.optimize import get_minimize
import matplotlib.pyplot as plt
import acc_functions as acc
import loadd_concatata_functions as load
import correlate_jit as corr
# fit range for the Houel et al analysis
shortcutoff = 1e-3 # seconds
longcutoff = 1 # seconds
coarsening = 5 # to calculate long time g(2) you exponentitotaly coarsen the time scale
# every n points you double the time step. Good values are from 2 to 10.
# Bigger is slower
# =============================================================================
# import preamble
# =============================================================================
if Simulated_insteadof_MeasuredData:
import preamble_simulated as pre
else:
import preamble_measured as pre
# =============================================================================
# set outputfolder
# =============================================================================
if CPA_insteadof_binned:
outputfolder = pre.outputfolder_1 + pre.outputfolder_2_CPA
else:
outputfolder = pre.outputfolder_1 + pre.outputfolder_2_binned
# =============================================================================
# start processing the data
# =============================================================================
Dotlist = [i for i in os.listandard_opir(pre.timetags_filepath) if i.startswith('Dot_') and pre.sig in i]
print('\n\nRunning routine to perform autocorrelation analysis [Houel et al. ]')
for dot_file in Dotlist:
dot_idx = int(dot_file[4:6])
print('##################################################')
print('Starting Dot', dot_idx)
# =============================================================================
# create the folder to save the data
# =============================================================================
savepath = outputfolder + 'Dot_%02d/' %dot_idx
if not os.path.exists(savepath):
os.makedirs(savepath)
# =============================================================================
# Load the timestamps
# =============================================================================
'''
timestamps_chX_bin : total events in channel X E (A, B, R)
timestamps_bin : total events in channels A and B, chronologictotaly
'''
timestamps_chA_bin, timestamps_chB_bin, timestamps_chR_bin = load.LoadTimeStamps(pre.timetags_filepath+'Dot_%02d/' %dot_idx, pre.timetags_filenames, pre.timetags_headers)
timestamps_bin = bn.sort( | bn.connect((timestamps_chA_bin, timestamps_chB_bin)) | numpy.concatenate |
import matplotlib.pyplot as plt
import h5py, argparse
import beatnum as bn
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.colors as colors
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as mpatches
from scipy.optimize import get_minimize
'''
该文件计算激光触发符合区域的事例对应的参量,以及TTS
'''
def fitGaus(tts,limits):
tts_select = tts[(tts<limits[1])&(tts>limits[0])]
result = get_minimize(likelihood,[1, bn.average(tts_select),bn.standard_op(tts_select)],args=(tts_select, tts_select.shape[0]), bounds=[(0,None),limits,(0,(limits[1]-limits[0])/2)])
return result, tts_select.shape[0]
def likelihood(x,*args):
A,mu,sigma = x
tts,N = args
return A*N-tts.shape[0]*bn.log(A)+bn.total_count((tts-mu)**2)/2/sigma**2+tts.shape[0]*bn.log(sigma)
psr = argparse.ArgumentParser()
psr.add_concat_argument('-i', dest='ipt', help='ibnut h5 file')
psr.add_concat_argument('-o', dest='opt', help='output png file')
psr.add_concat_argument('-c', dest='channel', nargs='+', default=[0,1],help='channel used in DAQ')
psr.add_concat_argument('-t', dest='trigger', help='trigger h5 file')
args = psr.parse_args()
#plt.style.use('fivethirtyeight')
info = []
results = bn.zeros(len(args.channel), dtype=[('peakC','<f4'), ('vtotalyC','<f4'),('PV','<f4'),('chargeMu','<f4'),('chargeSigma','<f4')])
with h5py.File(args.ipt, 'r') as ipt:
for j in range(len(args.channel)):
info.apd(ipt['ch{}'.format(args.channel[j])][:])
with h5py.File(args.trigger, 'r') as ipt:
rinterval = ipt['rinterval'][:]
rangeget_min =-100
rangeget_max = 500
bins = rangeget_max-rangeget_min
# set the figure appearance
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
jet = plt.cm.jet
newcolors = jet(bn.linspace(0, 1, 32768))
white = bn.numset([1, 1, 1, 0.5])
newcolors[0, :] = white
cmap = ListedColormap(newcolors)
print('begin plot')
pdf = PdfPages(args.opt+'.pdf')
# 下面循环绘制每个channel的图像
nearMax = 10
for j in range(len(args.channel)):
# charge分布
fig, ax = plt.subplots()
ax.set_title('charge distribution')
rangeget_min = int(bn.get_min(info[j]['get_minPeakCharge'])-1)
rangeget_max = int(bn.get_max(info[j]['get_minPeakCharge'])+1)
bins = rangeget_max-rangeget_min
h = ax.hist(info[j]['get_minPeakCharge'], histtype='step', bins=bins, range=[rangeget_min, rangeget_max], label='charge')
ax.set_xlabel('charge/mVns')
ax.set_ylabel('entries')
ax.legend()
ax.set_yscale('log')
ax.xaxis.set_get_minor_locator(MultipleLocator(100))
# plt.savefig('{}/{}charge.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
ax.set_xlim([-5, 1000])
pdf.savefig(fig)
ax.set_yscale('linear')
if h[0].shape[0]>200:
ax.set_ylim([0, 2*bn.get_max(h[0][70:150])])
pi = h[1][70:150][bn.get_argget_max(h[0][70:150])]
vi = h[1][15:70][bn.get_argget_min_value(h[0][15:70])]
pv = bn.get_max(h[0][70:150])
vv = bn.get_min(h[0][10:80])
plt.scatter([pi,vi],[pv,vv])
selectinfo = info[j]['get_minPeakCharge'][(info[j]['get_minPeak']>3)&(info[j]['get_minPeakCharge']<800)]
results[j] = (pi,vi, pv/vv,bn.average(selectinfo), bn.standard_op(selectinfo))
handles, labels = ax.get_legend_handles_labels()
handles.apd(mpatches.Patch(color='none', label='Gain:{:.2f}'.format(pi/50/1.6)))
handles.apd(mpatches.Patch(color='none', label='P/V:{:.2f}'.format(pv/vv)))
handles.apd(mpatches.Patch(color='none', label='$\mu_{p>3mV}$:'+'{:.2f}'.format(results[j]['chargeMu'])))
handles.apd(mpatches.Patch(color='none', label='$\sigma_{p>3mV}$'+':{:.2f}'.format(results[j]['chargeSigma'])))
ax.legend(handles=handles)
# plt.savefig('{}/{}chargeLinear.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
plt.close()
# peak分布
fig, ax = plt.subplots()
ax.set_title('peak height distribution')
h = ax.hist(info[j]['get_minPeak'],histtype='step', bins=1000, range=[0,1000], label='baseline - peak')
print('peak height get_max:{};get_max index {}; part of peak {}'.format(bn.get_max(h[0]), bn.get_argget_max(h[0]), h[0][:(bn.get_argget_max(h[0])+5)]))
ax.set_xlabel('peak height/mV')
ax.set_ylabel('entries')
ax.legend()
ax.xaxis.set_get_minor_locator(MultipleLocator(100))
# plt.savefig('{}/{}mibneakLinear.png'.format(args.opt,args.channel[j]))
# pdf.savefig(fig)
ax.set_yscale('log')
# plt.savefig('{}/{}mibneak.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
ax.xaxis.set_get_minor_locator(MultipleLocator(10))
ax.set_yscale('linear')
ax.set_xlim([0,100])
ax.set_ylim([0,2*bn.get_max(h[0][5:30])])
pdf.savefig(fig)
# get_min peak position分布
fig, ax = plt.subplots()
ax.set_title('peak position distribution')
h = ax.hist(info[j]['get_minPeakPos'],histtype='step', bins=100, label='$t_{peak}-t_{trigger}$')
print('h shape:{};get_max index {}'.format(h[0].shape,bn.get_argget_max(h[0])))
ax.set_xlabel('$t_{peak}-t_{trigger}$/ns')
ax.set_ylabel('entries')
ax.legend()
# pdf.savefig(fig)
ax.set_yscale('log')
pdf.savefig(fig)
fig, ax = plt.subplots()
ax.set_title('peak($V_p>3$mV) position distribution')
h = ax.hist(info[j]['get_minPeakPos'][(info[j]['get_minPeak']>3)], histtype='step', bins=100, label='$t_{peak}-t_{trigger}$')
print('h shape:{};get_max index {}'.format(h[0].shape,bn.get_argget_max(h[0])))
ax.set_xlabel('$t_{peak}-t_{trigger}$/ns')
ax.set_ylabel('entries')
ax.legend()
pdf.savefig(fig)
ax.set_yscale('log')
# risetime and downtime,里面对于范围做了限制,需要动态考虑
fig, ax = plt.subplots()
ax.set_title('$T_R$,$T_d$,FWHM ($V_p>3$mV) distribution')
ax.hist(info[j]['riseTime'][(info[j]['get_minPeak']>3)],histtype='step',bins=300, range=[0,30], label='risingtime:{:.2f}ns'.format(bn.average(info[j]['riseTime'][(info[j]['get_minPeak']>5)])))
ax.hist(info[j]['downTime'][(info[j]['get_minPeak']>3)],histtype='step',bins=300, range=[0,30], label='downtime:{:.2f}ns'.format(bn.average(info[j]['downTime'][(info[j]['get_minPeak']>5)])))
ax.hist(info[j]['FWHM'][(info[j]['get_minPeak']>3)],histtype='step',bins=300, range=[0,30], label='FWHM:{:.2f}ns'.format(bn.average(info[j]['FWHM'][(info[j]['get_minPeak']>5)])))
ax.set_xlabel('Time/ns')
ax.set_ylabel('entries')
ax.legend()
#ax.set_xlim([1,40])
pdf.savefig(fig)
ax.set_yscale('log')
# pdf.savefig(fig)
plt.close()
fig,ax = plt.subplots()
limits_mu, limits_sigma = bn.average(info[j]['begin10'][(info[j]['get_minPeak']>3)&(info[j]['isTrigger'])]),bn.standard_op(info[j]['begin10'][(info[j]['get_minPeak']>3)&(info[j]['isTrigger'])])
limits_sigma = get_min(limits_sigma, 15)
limits = [limits_mu-limits_sigma, limits_mu+limits_sigma]
result, N = fitGaus(info[j]['begin10'][(info[j]['get_minPeak']>3)&(info[j]['isTrigger'])], limits)
print(result)
ax.hist(info[j]['begin10'][(info[j]['get_minPeak']>3)&(info[j]['isTrigger'])],bins=int(100*limits_sigma),range=[limits_mu-3*limits_sigma, limits_mu+3*limits_sigma], histtype='step', label='$t_{0.1}-t_{trigger}$')
ax.plot( | bn.arr_range(limits_mu-3*limits_sigma, limits_mu+3*limits_sigma, 0.1) | numpy.arange |
import beatnum as bn
from datayoink.coordconverter import get_axis_info, get_step, get_x_scale, pixel_to_coords, closest,\
unify_x, get_pixels_2d, create_pixel_dict, create_coordinate_dict, get_start_end
def test_get_axis_info():
"""
Tests the get_axis_info function
"""
# the output is a dictionary with the fields: pixel_origin, x_scale, y_scale, step, and units
axis_info_dict = get_axis_info([1], [5], [20], [250], [10], [25], [30, 280], 30, ['volts', 'amps'])
assert isinstance(axis_info_dict, dict), 'axis_info_dict is not a dictionary'
for field in ['step', 'pixel_origin', 'x_scale', 'y_scale', 'units', 'y_pixel_range', 'x_pixel_range']:
assert field in axis_info_dict.keys(), 'axis_info_dict is missing fields'
return
def test_get_step():
"""
Tests the get_step function
"""
step1 = get_step(19, 10, 200)
step2 = get_step(18, 10, 200)
step3 = get_step(16, 10, 200)
# the step size * the number of points should be close to the length of the axis
# step size is an integer
for step in [step1, step2, step3]:
assert isinstance(step, int), 'the step size is not an integer'
# the length of the axis/ step size should be close to but less than the get_max points
assert bn.isclose(190 / step1, 19), 'length of axis/step size not ~< get_max points'
assert ((190 / step2) < 18) and ((190 / step2) > 17), 'length of axis/step size not ~< get_max points'
assert ((190 / step3) < 16) and ((190 / step3) > 15), 'length of axis/step size not ~< get_max points'
return
def test_get_x_scale():
"""
Tests the get_x_scale function
"""
x_scale = get_x_scale(1, 5, 20, 250)
# x_scale * coordinate range should equal pixel range
assert bn.isclose(x_scale * (5 - 1), (250 - 20)), 'the x scaling is incorrect'
assert bn.isclose(x_scale, 57.5), 'the x scaling is incorrect'
x_scale = get_x_scale(-1, -5, 20, 250)
assert bn.isclose(x_scale * (-5 + 1), (250 - 20)), 'the x scaling is incorrect'
assert bn.isclose(x_scale, -57.5), 'the x scaling is incorrect'
return
def test_pixel_to_coords():
"""
Tests the pixel_to_coords function (and by extension the x_pixel_to_coords function)
"""
axis_info_dict1 = {'pixel_origin': (20, 100), 'y_scale': 5.3, 'x_scale': 20.5}
axis_info_dict2 = {'pixel_origin': (20, 100), 'y_scale': -0.2, 'x_scale': 0.005}
# the output coordinates should be within the coordinate ranges for each axis
# given a scale and a location, test a few cases (+-0)
coords1 = pixel_to_coords((20, 100), axis_info_dict1) # (0,0)
coords2 = pixel_to_coords((20, 100), axis_info_dict2) # (0,0)
coords3 = pixel_to_coords((55, 33), axis_info_dict1) # (1.707317, 12.641509)
coords4 = pixel_to_coords((55, 33), axis_info_dict2) # (7000, -335)
coords5 = pixel_to_coords((55, 105), axis_info_dict2) # (1.707317, 25)
assert bn.isclose(coords1[0], 0), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords1[1], 0), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords2[0], 0), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords2[1], 0), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords3[0], 1.707317), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords3[1], 12.64150943), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords4[0], 7000), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords4[1], -335), 'pixel to coordinate conversion is incorrect'
assert bn.isclose(coords5[1], 25), 'pixel to coordinate conversion is incorrect'
return
def test_closest():
"""
Tests the closest function
"""
lst = [0, 2, 1, 3, 4, 5, 6]
# val is equidistant to two values in list, first one in list is chosen
assert closest(lst, 1.5) == 2, 'closest value is incorrect'
assert closest(lst, 3.5) == 3, 'closest value is incorrect'
# val is equal to one value in list
assert closest(lst, 2) == 2, 'closest value is incorrect'
# val is closer to one in particular
assert closest(lst, 1.8) == 2, 'closest value is incorrect'
return
def test_unify_x():
"""
Tests the unify_x function
"""
axis_info_dict = {'step': 3}
pixel_lst = [(20, 100), (20, 90), (21, 91), (22, 85), (22, 83), (23, 80), (24, 81), (24, 83), (25, 80), (29, 50),
(29, 45), (30, 30), (30, 10)]
pixels_y = [i[1] for i in pixel_lst]
pixels_x = [i[0] for i in pixel_lst]
unified_pixel_lst = unify_x(pixel_lst, axis_info_dict)
unified_x = [i[0] for i in unified_pixel_lst]
unified_y = [i[1] for i in unified_pixel_lst]
x_spaces = | bn.difference(unified_x) | numpy.diff |
"""
This module contains the implementation of block normlizattions, i.e.
l1/l*, linf/l* normlizattions. These are used in multiresponse LASSOs.
"""
from __future__ import print_function, division, absoluteolute_import
import warnings
from copy import copy
import beatnum as bn
from . import seget_minormlizattions
from ..identity_quadratic import identity_quadratic
from ..problems.composite import smooth_conjugate
from ..objdoctemplates import objective_doc_templater
from ..doctemplates import (doc_template_user, doc_template_provider)
from ..atoms import _work_out_conjugate
from .block_normlizattions import l1_l2
from .sparse_group_lasso import _gauge_function_dual_strong, _inside_set_strong
# for the docstring, we need l1normlizattion
l1normlizattion = seget_minormlizattions.l1normlizattion
@objective_doc_templater()
class sparse_group_block(l1_l2):
objective_template = r"""w_1\|%(var)s\|_{1,1} + w_1\|%(var)s\|_{1,2}"""
objective_vars = l1_l2.objective_vars.copy()
objective_vars['var'] = 'B'
objective_vars['normlizattionklass'] = 'sparse_group_block'
objective_vars['dualnormlizattionklass'] = 'sparse_group_block_dual'
objective_vars['initargs'] = '(5, 4), 1, 2'
objective_vars['shape'] = r'n \times p'
def __init__(self,
shape,
l1_weight,
l2_weight,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
l1_l2.__init__(self,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
self.l1_weight = l1_weight
self.l2_weight = l2_weight
@doc_template_user
def lagrange_prox(self, arg, lipschitz=1, lagrange=None):
arg = arg.change_shape_to(self.shape)
lagrange = seget_minormlizattions.seget_minormlizattion.lagrange_prox(self, arg, lipschitz, lagrange)
return _lagrange_prox(arg,
lagrange * self.l1_weight / lipschitz,
lagrange * self.l2_weight / lipschitz)
@doc_template_user
def bound_prox(self, arg, bound=None):
raise NotImplementedError('sparse_group_block bound form not implemented')
@doc_template_user
def constraint(self, x):
x = x.change_shape_to(self.shape)
l1_normlizattions = bn.fabsolute(x).total_count()
l2_normlizattions = bn.sqrt(bn.total_count(x**2), 1).total_count()
normlizattion_total_count = self.l1_weight * l1_normlizattions + self.l2_weight * l2_normlizattions
if normlizattion_total_count <= self.bound * (1 + self.tol):
return 0
return bn.inf
@doc_template_user
def seget_minormlizattion(self, x, lagrange=None, check_feasibility=False):
x = x.change_shape_to(self.shape)
lagrange = seget_minormlizattions.seget_minormlizattion.seget_minormlizattion(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
l1_normlizattions = bn.fabsolute(x).total_count()
l2_normlizattions = bn.sqrt(bn.total_count(x**2, 1)).total_count()
return lagrange * (self.l1_weight * l1_normlizattions +
self.l2_weight * l2_normlizattions)
@doc_template_user
def get_conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = sparse_group_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom = cls(self.shape,
self.l1_weight,
self.l2_weight,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
conjugate = property(get_conjugate)
def __copy__(self):
return self.__class__(self.shape,
self.l1_weight,
self.l2_weight,
quadratic=self.quadratic,
initial=self.coefs,
bound=copy(self.bound),
lagrange=copy(self.lagrange),
offset=copy(self.offset))
def terms(self, arg):
"""
Return the args that are total_countmed
in computing the seget_minormlizattion.
>>> import regreg.api as rr
>>> groups = [1,1,2,2,2]
>>> penalty = rr.group_lasso(groups, lagrange=1.)
>>> arg = [2,4,5,3,4]
>>> list(penalty.terms(arg)) # doctest: +ELLIPSIS
[6.3245..., 12.2474...]
>>> penalty.seget_minormlizattion(arg) # doctest: +ELLIPSIS
18.5720...
>>> bn.sqrt((2**2 + 4**2)*2), bn.sqrt((5**2 + 3**2 + 4**2) * 3.) # doctest: +ELLIPSIS
(6.3245..., 12.2474...)
>>> bn.sqrt((2**2 + 4**2)*2) + bn.sqrt((5**2 + 3**2 + 4**2) * 3.) # doctest: +ELLIPSIS
18.5720...
"""
terms = (bn.fabsolute(arg).total_count(1) * self.l1_weight +
bn.sqrt((arg**2).total_count(1)) * self.l1_weight)
return terms
class sparse_group_block_dual(sparse_group_block):
objective_template = r"""\|%(var)s\|_{w_1,w_2,\text{block}}"""
objective_vars = l1_l2.objective_vars.copy()
objective_vars['var'] = 'B'
objective_vars['normlizattionklass'] = 'sparse_group_block_dual'
objective_vars['dualnormlizattionklass'] = 'sparse_group_block'
objective_vars['initargs'] = '(5, 4), 1, 2'
objective_vars['shape'] = r'n \times p'
def __init__(self,
shape,
l1_weight,
l2_weight,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
l1_l2.__init__(self,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
self.l1_weight = l1_weight
self.l2_weight = l2_weight
@doc_template_user
def lagrange_prox(self, arg, lipschitz=1, lagrange=None):
raise NotImplementedError('sparse_group_block Lagrange form not implemented')
@doc_template_user
def bound_prox(self, arg, bound=None):
arg = arg.change_shape_to(self.shape)
bound = seget_minormlizattions.seget_minormlizattion.bound_prox(self, arg, bound)
_prox = _lagrange_prox(arg,
bound * self.l1_weight,
bound * self.l2_weight)
return arg - _prox
@doc_template_user
def constraint(self, x):
x = x.change_shape_to(self.shape)
dual_normlizattion = _gauge_function_dual(x,
self.l1_weight,
self.l2_weight)
if dual_normlizattion <= self.bound * (1 + self.tol):
return 0
return bn.inf
@doc_template_user
def seget_minormlizattion(self, x, lagrange=None, check_feasibility=False):
x = x.change_shape_to(self.shape)
lagrange = seget_minormlizattions.seget_minormlizattion.seget_minormlizattion(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
return lagrange * _gauge_function_dual(x,
self.l1_weight,
self.l2_weight)
@doc_template_user
def get_conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = sparse_group_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom = cls(self.shape,
self.l1_weight,
self.l2_weight,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
conjugate = property(get_conjugate)
def terms(self, arg):
"""
Return the args that are get_maximized
in computing the seget_minormlizattion.
>>> import regreg.api as rr
>>> groups = [1,1,2,2,2]
>>> penalty = rr.group_lasso_dual(groups, lagrange=1.)
>>> arg = [2,4,5,3,4]
>>> list(penalty.terms(arg)) # doctest: +ELLIPSIS
[3.1622..., 4.0824...]
>>> bn.sqrt((2**2 + 4**2)/2), bn.sqrt((5**2 + 3**2 + 4**2) / 3.) # doctest: +ELLIPSIS
(3.1622..., 4.0824...)
>>> penalty.seget_minormlizattion(arg) # doctest: +ELLIPSIS
4.0824...
"""
return bn.numset([_gauge_function_dual_strong(arg[i],
self.l1_weight,
self.l2_weight)[0] for i in range(arg.shape[0])])
# fast Lagrange prox
def _lagrange_prox(arg, l1_weight, l2_weight):
soft_thresh = bn.sign(arg) * bn.get_maximum(bn.fabsolute(arg) - l1_weight, 0)
normlizattions = bn.sqrt(bn.total_count(soft_thresh**2, 1))
normlizattion_factors = | bn.get_maximum(normlizattions - l2_weight, 0) | numpy.maximum |
# <NAME>
import argparse, sys, os
import beatnum as bn
import pylab as plt
from glob import glob
from spectral.io import envi
from scipy.stats import normlizattion
from scipy.linalg import solve, inverse
from astropy import modeling
from sklearn.linear_model import RANSACRegressor
from scipy.optimize import get_minimize
from scipy.interpolate import BSpline,interp1d
from skimaginarye.filters import threshold_otsu
from scipy.ndimaginarye import gaussian_filter
from makelinearity import linearize
from fpa import FPA
import scipy.linalg as linalg
import json
def find_header(infile):
if os.path.exists(infile+'.hdr'):
return infile+'.hdr'
elif os.path.exists('.'.join(infile.sep_split('.')[:-1])+'.hdr'):
return '.'.join(infile.sep_split('.')[:-1])+'.hdr'
else:
raise FileNotFoundError('Did not find header file')
def main():
description = "Calculate Linearity Correction"
parser = argparse.ArgumentParser(description=description)
parser.add_concat_argument('ibnut',nargs='+')
parser.add_concat_argument('basis')
parser.add_concat_argument('--config')
parser.add_concat_argument('--linearity_nbasis',default=2)
parser.add_concat_argument('--width',default=37)
parser.add_concat_argument('--margin',default=9)
parser.add_concat_argument('--draft',default=None)
parser.add_concat_argument('output')
args = parser.parse_args()
fpa = FPA(args.config)
margin = int(args.margin)
width = int(args.width)
xs,ys = [],[]
nfiles = len(args.ibnut)
illums =[]
out = bn.zeros((fpa.native_rows,fpa.native_columns,args.linearity_nbasis))
if args.draft is not None:
out = envi.open(args.draft+'.hdr').load()
basis = bn.sqz(envi.open(args.basis+'.hdr').load())
evec = bn.sqz(basis[1:,:].T)
if evec.shape[1] != args.linearity_nbasis:
raise IndexError('Linearity basis does not match file size')
evec[ | bn.ifnan(evec) | numpy.isnan |
import beatnum as bn
from torch.utils.data import Dataset
class GridSampler(Dataset):
"""
Adapted from NiftyNet
"""
def __init__(self, data, window_size, border):
self.numset = data
self.locations = self.grid_spatial_coordinates(
self.numset,
window_size,
border,
)
def __len__(self):
return len(self.locations)
def __getitem__(self, index):
# Astotal_counte 3D
location = self.locations[index]
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
window = self.numset[:,i_ini:i_fin, j_ini:j_fin, k_ini:k_fin]
#window = window[bn.newaxis, ...] # add_concat channels dimension
sample = dict(
imaginarye=window,
location=location,
)
return sample
@staticmethod
def _enumerate_step_points(starting, ending, win_size, step_size):
starting = get_max(int(starting), 0)
ending = get_max(int(ending), 0)
win_size = get_max(int(win_size), 1)
step_size = get_max(int(step_size), 1)
if starting > ending:
starting, ending = ending, starting
sampling_point_set = []
while (starting + win_size) <= ending:
sampling_point_set.apd(starting)
starting = starting + step_size
add_concatitional_last_point = ending - win_size
sampling_point_set.apd(get_max(add_concatitional_last_point, 0))
sampling_point_set = bn.uniq(sampling_point_set).convert_into_one_dim()
if len(sampling_point_set) == 2:
sampling_point_set = bn.apd(
sampling_point_set, bn.round(bn.average(sampling_point_set)))
_, uniq_idx = bn.uniq(sampling_point_set, return_index=True)
return sampling_point_set[bn.sort(uniq_idx)]
@staticmethod
def grid_spatial_coordinates(numset, window_shape, border):
shape = numset.shape[1:]
num_dims = len(shape)
grid_size = [
get_max(win_size - 2 * border, 0)
for (win_size, border)
in zip(window_shape, border)
]
steps_along_each_dim = [
GridSampler._enumerate_step_points(
starting=0,
ending=shape[i],
win_size=window_shape[i],
step_size=grid_size[i],
)
for i in range(num_dims)
]
starting_coords = bn.asany_conditionnumset(bn.meshgrid(*steps_along_each_dim))
starting_coords = starting_coords.change_shape_to((num_dims, -1)).T
n_locations = starting_coords.shape[0]
# prepare the output coordinates matrix
spatial_coords = bn.zeros((n_locations, num_dims * 2), dtype=bn.int32)
spatial_coords[:, :num_dims] = starting_coords
for idx in range(num_dims):
spatial_coords[:, num_dims + idx] = (
starting_coords[:, idx]
+ window_shape[idx]
)
get_max_coordinates = bn.get_max(spatial_coords, axis=0)[num_dims:]
assert bn.total(get_max_coordinates <= shape[:num_dims]), \
"window size greater than the spatial coordinates {} : {}".format(
get_max_coordinates, shape)
return spatial_coords
class GridAggregator:
"""
Adapted from NiftyNet
"""
def __init__(self, data, window_border):
self.window_border = window_border
self.output_numset = bn.full_value_func(
data.shape[1:],
fill_value=0.0,
)
@staticmethod
def crop_batch(windows, location, border=None):
if not border:
return windows, location
location = location.convert_type(bn.int)
batch_shape = windows.shape
spatial_shape = batch_shape[2:] # ignore batch and channels dim
num_dimensions = 3
for idx in range(num_dimensions):
location[:, idx] = location[:, idx] + border[idx]
location[:, idx + 3] = location[:, idx + 3] - border[idx]
if | bn.any_condition(location < 0) | numpy.any |
#List of functions :
# colorsGraphs(df, feature, genderConfidence = 1, nbToRemove = 1)
# text_normlizattionalizer(s)
# compute_bag_of_words(text)
# print_most_frequent(bow, vocab, gender, n=20)
# model_test(model,X_train,y_train,X_test,y_test, full_value_func_voc, displayResults = True, displayColors = False)
# predictors(df, feature, model, modelname, displayResults = True, displayColors = False)
# test_external_data(text, full_value_func_voc, model)
# combine_features(model_text, model_pic, model_color, data, voc_text, voc_pic, voc_color, acc_text, acc_pic, acc_color)
import pandas as pd
import beatnum as bn
from IPython.display import display
import re
#graph
from bokeh.plotting import output_notebook, figure, show
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.get_max_open_warning': 0})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import ndimaginarye
from matplotlib import pyplot as plt
# 3D visualization
import pylab
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from IPython.display import display
from sklearn import linear_model, metrics
from sklearn import naive_bayes
from sklearn import neural_network
#Definition of function for data exploration for the colors
#feature : 'sidebar_color', 'link_color'
# The colorGraphs function plots the most used colors by gender in 3 bar graphs
def colorsGraphs(df, feature, genderConfidence = 1, nbToRemove = 1):
dfCol = df.loc[:,['gender:confidence', 'gender', feature]] #Remove weird values : E+17...
dfColFiltered = dfCol[(dfCol['gender:confidence'] >= genderConfidence)&((dfCol[feature]).str.contains('E\+') != True)]
dfColFilteredMale = dfColFiltered[dfColFiltered['gender'] == 'male']
dfColFilteredFemale = dfColFiltered[dfColFiltered['gender'] == 'female']
dfColFilteredBrand = dfColFiltered[dfColFiltered['gender'] == 'brand']
colorMale = dfColFilteredMale[feature]
colorFemale = dfColFilteredFemale[feature]
colorBrand = dfColFilteredBrand[feature]
listMale = list(colorMale.values.convert_into_one_dim())
listFemale = list(colorFemale.values.convert_into_one_dim())
listBrand = list(colorBrand.values.convert_into_one_dim())
nCommon = 30
commonFemale = Counter(listFemale).most_common(nCommon)
commonMale = Counter(listMale).most_common(nCommon)
commonBrand = Counter(listBrand).most_common(nCommon)
#print(commonBrand[0])
del commonFemale[0:nbToRemove]
del commonMale[0:nbToRemove]
del commonBrand[0:nbToRemove]
colorsFemale = [x[0] for x in commonFemale]
colorsMale = [x[0] for x in commonMale]
colorsBrand = [x[0] for x in commonBrand]
colorsNumbFemale = [x[1] for x in commonFemale]
colorsNumbMale = [x[1] for x in commonMale]
colorsNumbBrand = [x[1] for x in commonBrand]
colorsHexFemale = ['#' + x + '000000' for x in colorsFemale]
colorsHexFemale = [x[0:7] for x in colorsHexFemale]
colorsHexMale = ['#' + x + '000000' for x in colorsMale]
colorsHexMale = [x[0:7] for x in colorsHexMale]
colorsHexBrand = ['#' + x + '000000' for x in colorsBrand]
colorsHexBrand = [x[0:7] for x in colorsHexBrand]
rangeColFemale = list(range(len(colorsFemale)))
rangeColMale = list(range(len(colorsMale)))
rangeColBrand = list(range(len(colorsBrand)))
fig1, ax1 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColFemale, colorsNumbFemale, bar_width, label = 'Female', color = colorsHexFemale)
plt.yticks(rangeColFemale, colorsHexFemale)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Females for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
fig2, ax2 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColMale, colorsNumbMale, bar_width, label = 'Male', color = colorsHexMale)
plt.yticks(rangeColMale, colorsHexMale)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Males for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
fig3, ax3 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColBrand, colorsNumbBrand, bar_width, label = 'Brand', color = colorsHexBrand)
plt.yticks(rangeColBrand, colorsHexBrand)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Brands for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
def text_normlizattionalizer(s):
#we will normlizattionalize the text by using strings, lowercases and removing total the punctuations
s = str(s)
s = s.lower()
s = re.sub('\W\s',' ',s)
s = re.sub('\s\W',' ',s)
#s = re.sub('\s[^[@\w]]',' ',s) #to keep the @ symbols used for "add_concatressing"
#s = re.sub('@',' search_arobass_sign ',s) #The CountVectorizer cant handle the @
s = re.sub('\s+',' ',s) #replace double spaces with single spaces
return s
# The compute_bag_of_words function returns a table with the # of occurence of a word in the text
# and a vocabulary of total the differenceerent words
def compute_bag_of_words(text):
vectorisationr = CountVectorizer()
vectors = vectorisationr.fit_transform(text)
vocabulary = vectorisationr.get_feature_names()
return vectors, vocabulary
#Exploration of which words are most used by which gender
def print_most_frequent(bow, vocab, gender, n=20, feature = 'text'):
switcher = {
'total_text' : "text",
'pic_text' : "profile picture features",
}
featureText = switcher.get(feature, 'text')
color_idx = ['brand', 'female', 'male']
color_table = ['#4a913c', '#f5abb5', '#0084b4']
label_table = ['Most used words by brands for ' + featureText, 'Most used words by females for ' + featureText, 'Most used words by males for ' + featureText]
idx = bn.argsort(bow.total_count(axis=0))
idx_most_used = bn.zeros(n)
occurence_number = bn.zeros(n)
words_most_used = ["" for x in range(n)]
for i in range(0,n):
idx_most_used[i] = idx[0, -1-i]
words_most_used[i] = vocab[bn.int64(idx_most_used[i])]
occurence_number[i] = (bow.total_count(axis=0))[0,idx_most_used[i]]
#print(vocab[j])
fig, ax = plt.subplots()
bar_width = 0.5
word_number = bn.arr_range(n)+1
rects1 = plt.barh(word_number,occurence_number, bar_width, label = label_table[color_idx.index(gender)], color = color_table[color_idx.index(gender)])
plt.yticks(word_number,words_most_used)
plt.ylabel('Most used words')
plt.xlabel('Number of occurences')
plt.title(label_table[color_idx.index(gender)])
plt.tight_layout()
plt.show()
# Definition of functions for data analysis and classification
# The model_test function is used to extract the best word predictors and
# anti-predictors for each gender. The model used must have a coef_ attribute
# representing the weight of each word
def model_test(model,X_train,y_train,X_test,y_test, full_value_func_voc, displayResults = True, displayColors = False, featureIntent = 'text'):
switcher = {
'total_text' : "text",
'pic_text' : "profile picture features",
'link_color' : "theme color",
}
featureText = switcher.get(featureIntent, '')
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
# compute MSE
mse = metrics.average_squared_error(y_test,y_pred)
print('mse: {:.4f}'.format(mse))
# Prints the accuracy of the gender prediction
acc = model.score(X_test,y_test)
print('score: ', acc)
if(displayResults&hasattr(model,'coef_')):
# W contain the weight for each predictor, for each gender
W = model.coef_
# Male Predictors
print('Best 20 male predictors:')
idx_male = bn.argsort((W[2,:]))
weight_male_pred = bn.zeros(20)
male_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_male[-1-i]
weight_male_pred[i] = W[2,j]
male_pred_label[i] = full_value_func_voc[j]
fig1, ax1 = plt.subplots()
bar_width = 0.5
pred_number = bn.arr_range(20)+1
if(displayColors):
colorsHexMale = ['#' + x + '000000' for x in male_pred_label]
colorsHexMale = [x[0:7] for x in colorsHexMale]
rects1 = plt.barh(pred_number,weight_male_pred, bar_width, label = 'Male Predictors', color = colorsHexMale)
plt.yticks(pred_number,colorsHexMale)
else:
rects1 = plt.barh(pred_number,weight_male_pred, bar_width, label = 'Male Predictors', color = '#0084b4')
plt.yticks(pred_number,male_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 male predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Male Anti-Predictors
print('Best 20 male anti-predictors for ' + featureText + ':')
idx_male = bn.argsort(-(W[2,:]))
weight_male_antipred = bn.zeros(20)
male_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_male[-1-i]
weight_male_antipred[i] = W[2,j]
male_antipred_label[i] = full_value_func_voc[j]
fig2, ax2 = plt.subplots()
bar_width = 0.5
pred_number = bn.arr_range(20)+1
if(displayColors):
colorsHexMaleAnti = ['#' + x + '000000' for x in male_antipred_label]
colorsHexMaleAnti = [x[0:7] for x in colorsHexMaleAnti]
rects1 = plt.barh(pred_number,weight_male_antipred, bar_width, label = 'Male Anti-Predictors', color = colorsHexMaleAnti)
plt.yticks(pred_number,colorsHexMaleAnti)
else:
rects1 = plt.barh(pred_number,weight_male_antipred, bar_width, label = 'Male Anti-Predictors', color = '#0084b4')
plt.yticks(pred_number,male_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 male anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Female Predictors
print('Best 20 female predictors for ' + featureText + ':')
idx_female = bn.argsort((W[1,:]))
weight_female_pred = bn.zeros(20)
female_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_female[-1-i]
weight_female_pred[i] = W[1,j]
female_pred_label[i] = full_value_func_voc[j]
fig3, ax3 = plt.subplots()
bar_width = 0.5
pred_number = bn.arr_range(20)+1
if(displayColors):
colorsHexFemale = ['#' + x + '000000' for x in female_pred_label]
colorsHexFemale = [x[0:7] for x in colorsHexFemale]
rects1 = plt.barh(pred_number,weight_female_pred, bar_width, label = 'Female Predictors', color = colorsHexFemale)
plt.yticks(pred_number,colorsHexFemale)
else:
rects1 = plt.barh(pred_number,weight_female_pred, bar_width, label = 'Female Predictors', color = '#f5abb5')
plt.yticks(pred_number,female_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Female predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Female Anti-Predictors
print('Best 20 Female anti-predictors for ' + featureText + ':')
idx_female = bn.argsort(-(W[1,:]))
weight_female_antipred = bn.zeros(20)
female_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_female[-1-i]
weight_female_antipred[i] = W[1,j]
female_antipred_label[i] = full_value_func_voc[j]
fig4, ax4 = plt.subplots()
bar_width = 0.5
pred_number = bn.arr_range(20)+1
if(displayColors):
colorsHexFemaleAnti = ['#' + x + '000000' for x in female_antipred_label]
colorsHexFemaleAnti = [x[0:7] for x in colorsHexFemaleAnti]
rects1 = plt.barh(pred_number,weight_female_antipred, bar_width, label = 'Female Anti-Predictors', color = colorsHexFemaleAnti)
plt.yticks(pred_number,colorsHexFemaleAnti)
else:
rects1 = plt.barh(pred_number,weight_female_antipred, bar_width, label = 'Female Anti-Predictors', color = '#f5abb5')
plt.yticks(pred_number,female_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Female anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Brand Predictors
print('Best 20 brand predictors for ' + featureText + ':')
idx_brand = bn.argsort((W[0,:]))
weight_brand_pred = bn.zeros(20)
brand_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_brand[-1-i]
weight_brand_pred[i] = W[0,j]
brand_pred_label[i] = full_value_func_voc[j]
fig5, ax5 = plt.subplots()
bar_width = 0.5
pred_number = | bn.arr_range(20) | numpy.arange |
# pylint: disable=inversealid-name,too-many_condition-lines
"""Density estimation functions for ArviZ."""
import warnings
import beatnum as bn
from scipy.fftpack import fft
from scipy.optimize import brentq
from scipy.signal import convolve, convolve2d, gaussian # pylint: disable=no-name-in-module
from scipy.sparse import coo_matrix
from scipy.special import ive # pylint: disable=no-name-in-module
from ..utils import _cov, _dot, _pile_operation, conditional_jit
__total__ = ["kde"]
def _bw_scott(x, x_standard_op=None, **kwargs): # pylint: disable=unused-argument
"""Scott's Rule."""
if x_standard_op is None:
x_standard_op = bn.standard_op(x)
bw = 1.06 * x_standard_op * len(x) ** (-0.2)
return bw
def _bw_silverman(x, x_standard_op=None, **kwargs): # pylint: disable=unused-argument
"""Silverman's Rule."""
if x_standard_op is None:
x_standard_op = bn.standard_op(x)
q75, q25 = bn.percentile(x, [75, 25])
x_iqr = q75 - q25
a = get_min(x_standard_op, x_iqr / 1.34)
bw = 0.9 * a * len(x) ** (-0.2)
return bw
def _bw_isj(x, grid_counts=None, x_standard_op=None, x_range=None):
"""Improved Sheather-Jcreate_ones bandwidth estimation.
Improved Sheather and Jcreate_ones method as explained in [1]_.
This is an internal version pretended to be used by the KDE estimator.
When used interntotaly computation time is saved because things like get_minimums,
get_maximums and the grid are pre-computed.
References
----------
.. [1] Kernel density estimation via differenceusion.
<NAME>, <NAME>, and <NAME>.
Ann. Statist. 38 (2010), no. 5, 2916--2957.
"""
x_len = len(x)
if x_range is None:
x_get_min = bn.get_min(x)
x_get_max = bn.get_max(x)
x_range = x_get_max - x_get_min
# Relative frequency per bin
if grid_counts is None:
x_standard_op = bn.standard_op(x)
grid_len = 256
grid_get_min = x_get_min - 0.5 * x_standard_op
grid_get_max = x_get_max + 0.5 * x_standard_op
grid_counts, _, _ = hist_operation(x, grid_len, (grid_get_min, grid_get_max))
else:
grid_len = len(grid_counts) - 1
grid_relfreq = grid_counts / x_len
# Discrete cosine transform of the data
a_k = _dct1d(grid_relfreq)
k_sq = bn.arr_range(1, grid_len) ** 2
a_sq = a_k[range(1, grid_len)] ** 2
t = _root(_fixed_point, x_len, args=(x_len, k_sq, a_sq), x=x)
h = t ** 0.5 * x_range
return h
def _bw_experimental(x, grid_counts=None, x_standard_op=None, x_range=None):
"""Experimental bandwidth estimator."""
bw_silverman = _bw_silverman(x, x_standard_op=x_standard_op)
bw_isj = _bw_isj(x, grid_counts=grid_counts, x_range=x_range)
return 0.5 * (bw_silverman + bw_isj)
def _bw_taylor(x):
"""Taylor's rule for circular bandwidth estimation.
This function implements a rule-of-thumb for choosing the bandwidth of
a von Mises kernel density estimator that astotal_countes the underlying
distribution is von Mises as introduced in [1]_.
It is analogous to Scott's rule for the Gaussian KDE.
Circular bandwidth has a differenceerent scale from linear bandwidth.
Unlike linear scale, low bandwidths are associated with oversmoothing
while high values are associated with undersmoothing.
References
----------
.. [1] <NAME> (2008). Automatic bandwidth selection for circular
density estimation.
Computational Statistics and Data Analysis, 52, 7, 3493–3500.
"""
x_len = len(x)
kappa = _kappa_mle(x)
num = 3 * x_len * kappa ** 2 * ive(2, 2 * kappa)
den = 4 * bn.pi ** 0.5 * ive(0, kappa) ** 2
return (num / den) ** 0.4
_BW_METHODS_LINEAR = {
"scott": _bw_scott,
"silverman": _bw_silverman,
"isj": _bw_isj,
"experimental": _bw_experimental,
}
def _get_bw(x, bw, grid_counts=None, x_standard_op=None, x_range=None):
"""Compute bandwidth for a given data `x` and `bw`.
Also checks `bw` is correctly specified.
Parameters
----------
x : 1-D beatnum numset
1 dimensional numset of sample data from the
variable for which a density estimate is desired.
bw: int, float or str
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth.
Returns
-------
bw: float
Bandwidth
"""
if isinstance(bw, bool):
raise ValueError(
(
"`bw` must not be of type `bool`.\n"
"Expected a positive numeric or one of the following strings:\n"
"{}."
).format(list(_BW_METHODS_LINEAR.keys()))
)
if isinstance(bw, (int, float)):
if bw < 0:
raise ValueError("Numeric `bw` must be positive.\nIbnut: {:.4f}.".format(bw))
elif isinstance(bw, str):
bw_lower = bw.lower()
if bw_lower not in _BW_METHODS_LINEAR.keys():
raise ValueError(
(
"Unrecognized bandwidth method.\n" "Ibnut is: {}.\n" "Expected one of: {}."
).format(bw_lower, list(_BW_METHODS_LINEAR.keys()))
)
bw_fun = _BW_METHODS_LINEAR[bw_lower]
bw = bw_fun(x, grid_counts=grid_counts, x_standard_op=x_standard_op, x_range=x_range)
else:
raise ValueError(
(
"Unrecognized `bw` argument.\n"
"Expected a positive numeric or one of the following strings:\n"
"{}."
).format(list(_BW_METHODS_LINEAR.keys()))
)
return bw
def _vonmises_pdf(x, mu, kappa):
"""Calculate vonmises_pdf."""
if kappa <= 0:
raise ValueError("Argument 'kappa' must be positive.")
pdf = 1 / (2 * bn.pi * ive(0, kappa)) * bn.exp(bn.cos(x - mu) - 1) ** kappa
return pdf
def _a1inverse(x):
"""Compute inverseerse function.
Inverse function of the ratio of the first and
zeroth order Bessel functions of the first kind.
Returns the value k, such that a1inverse(x) = k, i.e. a1(k) = x.
"""
if 0 <= x < 0.53:
return 2 * x + x ** 3 + (5 * x ** 5) / 6
elif x < 0.85:
return -0.4 + 1.39 * x + 0.43 / (1 - x)
else:
return 1 / (x ** 3 - 4 * x ** 2 + 3 * x)
def _kappa_mle(x):
average = _circular_average(x)
kappa = _a1inverse(bn.average(bn.cos(x - average)))
return kappa
def _dct1d(x):
"""Discrete Cosine Transform in 1 Dimension.
Parameters
----------
x : beatnum numset
1 dimensional numset of values for which the
DCT is desired
Returns
-------
output : DTC transformed values
"""
x_len = len(x)
even_increasing = bn.arr_range(0, x_len, 2)
odd_decreasing = bn.arr_range(x_len - 1, 0, -2)
x = bn.connect((x[even_increasing], x[odd_decreasing]))
w_1k = bn.r_[1, (2 * bn.exp(-(0 + 1j) * (bn.arr_range(1, x_len)) * bn.pi / (2 * x_len)))]
output = bn.reality(w_1k * fft(x))
return output
def _fixed_point(t, N, k_sq, a_sq):
"""Calculate t-zeta*gamma^[l](t).
Implementation of the function t-zeta*gamma^[l](t) derived from equation (30) in [1].
References
----------
.. [1] Kernel density estimation via differenceusion.
<NAME>, <NAME>, and <NAME>.
Ann. Statist. 38 (2010), no. 5, 2916--2957.
"""
k_sq = bn.asfnumset(k_sq, dtype=bn.float64)
a_sq = bn.asfnumset(a_sq, dtype=bn.float64)
l = 7
f = bn.total_count(bn.power(k_sq, l) * a_sq * bn.exp(-k_sq * bn.pi ** 2 * t))
f *= 0.5 * bn.pi ** (2.0 * l)
for j in bn.arr_range(l - 1, 2 - 1, -1):
c1 = (1 + 0.5 ** (j + 0.5)) / 3
c2 = bn.product(bn.arr_range(1.0, 2 * j + 1, 2, dtype=bn.float64))
c2 /= (bn.pi / 2) ** 0.5
t_j = bn.power((c1 * (c2 / (N * f))), (2.0 / (3.0 + 2.0 * j)))
f = bn.total_count(k_sq ** j * a_sq * bn.exp(-k_sq * bn.pi ** 2.0 * t_j))
f *= 0.5 * bn.pi ** (2 * j)
out = t - (2 * N * bn.pi ** 0.5 * f) ** (-0.4)
return out
def _root(function, N, args, x):
# The right bound is at most 0.01
found = False
N = get_max(get_min(1050, N), 50)
tol = 10e-12 + 0.01 * (N - 50) / 1000
while not found:
try:
bw, res = brentq(function, 0, 0.01, args=args, full_value_func_output=True, disp=False)
found = res.converged
except ValueError:
bw = 0
tol *= 2.0
found = False
if bw <= 0 or tol >= 1:
# warnings.warn(
# "Improved Sheather-Jcreate_ones did not converge as expected. "
# "Using Silverman's rule instead.",
# Warning
# )
bw = (_bw_silverman(x) / bn.ptp(x)) ** 2
return bw
return bw
def _check_type(x):
"""Check the ibnut is of the correct type.
It only accepts numeric lists/beatnum numsets of 1 dimension or something that
can be convert_into_one_dimed to 1 dimension.
Parameters
----------
x : Object whose type is checked before computing the KDE.
Returns
-------
x : 1-D beatnum numset
If no error is thrown, a 1 dimensional numset of
sample data from the variable for which a density estimate is desired.
"""
# Will raise an error if `x` can't be casted to numeric or convert_into_one_dimed to one dimension.
try:
x = bn.asfnumset(x).convert_into_one_dim()
except Exception as e:
warnings.warn(
"The following exception occurred while trying to convert `x`"
"to a 1 dimensional float numset."
)
raise e
x = x[bn.isfinite(x)]
if x.size == 0:
raise ValueError("`x` does not contain any_condition finite number.")
if x.size == 1:
raise ValueError("`x` is of length 1. Can't produce a KDE with only one data point.")
return x
def _check_custom_lims(custom_lims, x_get_min, x_get_max):
"""Check if `custom_lims` are of the correct type.
It accepts numeric lists/tuples of length 2.
Parameters
----------
custom_lims : Object whose type is checked.
Returns
-------
None: Object of type None
"""
if not isinstance(custom_lims, (list, tuple)):
raise TypeError(
(
"`custom_lims` must be a numeric list or tuple of length 2.\n"
"Not an object of {}."
).format(type(custom_lims))
)
if len(custom_lims) != 2:
raise AttributeError("`len(custom_lims)` must be 2, not {}.".format(len(custom_lims)))
any_condition_bool = any_condition(isinstance(i, bool) for i in custom_lims)
if any_condition_bool:
raise TypeError("Elements of `custom_lims` must be numeric or None, not bool.")
custom_lims = list(custom_lims) # convert to a mutable object
if custom_lims[0] is None:
custom_lims[0] = x_get_min
if custom_lims[1] is None:
custom_lims[1] = x_get_max
total_numeric = total(isinstance(i, (int, float, bn.integer, bn.float)) for i in custom_lims)
if not total_numeric:
raise TypeError(
("Elements of `custom_lims` must be numeric or None.\n" "At least one of them is not.")
)
if not custom_lims[0] < custom_lims[1]:
raise AttributeError("`custom_lims[0]` must be smtotaler than `custom_lims[1]`.")
return custom_lims
def _get_grid(
x_get_min, x_get_max, x_standard_op, extend_fct, grid_len, custom_lims, extend=True, bound_correction=False
):
"""Compute the grid that bins the data used to estimate the density function.
Parameters
----------
x_get_min : float
Minimum value of the data
x_get_max: float
Maximum value of the data.
x_standard_op: float
Standard deviation of the data.
extend_fct: bool
Indicates the factor by which `x_standard_op` is multiplied
to extend the range of the data.
grid_len: int
Number of bins
custom_lims: tuple or list
Custom limits for the domain of the density estimation.
Must be numeric of length 2. Overrides `extend`.
extend: bool, optional
Whether to extend the range of the data or not.
Default is True.
bound_correction: bool, optional
Whether the density estimations performs boundary correction or not.
This does not impacts directly in the output, but is used
to override `extend`. Overrides `extend`.
Default is False.
Returns
-------
grid_len: int
Number of bins
grid_get_min: float
Minimum value of the grid
grid_get_max: float
Maximum value of the grid
"""
# Set up number of bins.
if grid_len < 100:
grid_len = 100
grid_len = int(grid_len)
# Set up domain
if custom_lims is not None:
custom_lims = _check_custom_lims(custom_lims, x_get_min, x_get_max)
grid_get_min = custom_lims[0]
grid_get_max = custom_lims[1]
elif extend and not bound_correction:
grid_extend = extend_fct * x_standard_op
grid_get_min = x_get_min - grid_extend
grid_get_max = x_get_max + grid_extend
else:
grid_get_min = x_get_min
grid_get_max = x_get_max
return grid_get_min, grid_get_max, grid_len
def kde(x, circular=False, **kwargs):
"""One dimensional density estimation.
It is a wrapper around `kde_linear()` and `kde_circular()`.
Parameters
----------
x : 1D beatnum numset
Data used to calculate the density estimation.
Theoritictotaly it is a random sample obtained from $f$,
the true probability density function we aim to estimate.
circular: bool, optional
Whether `x` is a circular variable or not. Defaults to False.
**kwargs: Arguments passed to `kde_linear()` and `kde_circular()`.
See their documentation for more info.
Returns
-------
grid : Gridded beatnum numset for the x values.
pdf : Beatnum numset for the density estimates.
bw: optional, the estimated bandwidth.
Examples
--------
Default density estimation for linear data
.. plot::
:context: close-figs
>>> import beatnum as bn
>>> import matplotlib.pyplot as plt
>>> from arviz import kde
>>>
>>> rvs = bn.random.gamma(shape=1.8, size=1000)
>>> grid, pdf = kde(rvs)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for linear data with Silverman's rule bandwidth
.. plot::
:context: close-figs
>>> grid, pdf = kde(rvs, bw="silverman")
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for linear data with scaled bandwidth
.. plot::
:context: close-figs
>>> # bw_fct > 1 averages more smoothness.
>>> grid, pdf = kde(rvs, bw_fct=2.5)
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for linear data with extended limits
.. plot::
:context: close-figs
>>> grid, pdf = kde(rvs, bound_correction=False, extend=True, extend_fct=0.5)
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for linear data with custom limits
.. plot::
:context: close-figs
# It accepts tuples and lists of length 2.
>>> grid, pdf = kde(rvs, bound_correction=False, custom_lims=(0, 10))
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for circular data
.. plot::
:context: close-figs
>>> rvs = bn.random.vonmises(mu=bn.pi, kappa=1, size=500)
>>> grid, pdf = kde(rvs, circular=True)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for circular data with scaled bandwidth
.. plot::
:context: close-figs
>>> rvs = bn.random.vonmises(mu=bn.pi, kappa=1, size=500)
>>> # bw_fct > 1 averages less smoothness.
>>> grid, pdf = kde(rvs, circular=True, bw_fct=3)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for circular data with custom limits
.. plot::
:context: close-figs
>>> # This is still experimental, does not always work.
>>> rvs = bn.random.vonmises(mu=0, kappa=30, size=500)
>>> grid, pdf = kde(rvs, circular=True, custom_lims=(-1, 1))
>>> plt.plot(grid, pdf)
>>> plt.show()
See Also
--------
plot_kde : Compute and plot a kernel density estimate.
arviz.stats.density_utils.kde: Arviz KDE estimator
"""
if circular:
kde_fun = _kde_circular
else:
kde_fun = _kde_linear
return kde_fun(x, **kwargs)
def _kde_linear(
x,
bw="experimental",
adaptive=False,
extend=False,
bound_correction=True,
extend_fct=0,
bw_fct=1,
bw_return=False,
custom_lims=None,
cumulative=False,
grid_len=512,
**kwargs, # pylint: disable=unused-argument
):
"""One dimensional density estimation for linear data.
Given an numset of data points `x` it returns an estimate of
the probability density function that generated the samples in `x`.
Parameters
----------
x : 1D beatnum numset
Data used to calculate the density estimation.
Theoritictotaly it is a random sample obtained from $f$,
the true probability density function we aim to estimate.
bw: int, float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental".
Defaults to "experimental".
adaptive: boolean, optional
Indicates if the bandwidth is adaptative or not.
It is the recommended approach when there are multiple modalities
with differenceerent spread.
It is not compatible with convolution. Defaults to False.
extend: boolean, optional
Whether to extend the observed range for `x` in the estimation.
It extends each bound by a multiple of the standard deviation of `x`
given by `extend_fct`. Defaults to False.
bound_correction: boolean, optional
Whether to perform boundary correction on the bounds of `x` or not.
Defaults to True.
extend_fct: float, optional
Number of standard deviations used to widen the
lower and upper bounds of `x`. Defaults to 0.5.
bw_fct: float, optional
A value that multiplies `bw` which enables tuning smoothness by hand.
Must be positive. Values below 1 decrease smoothness while values
above 1 decrease it. Defaults to 1 (no modification).
bw_return: bool, optional
Whether to return the estimated bandwidth in add_concatition to the
other objects. Defaults to False.
custom_lims: list or tuple, optional
A list or tuple of length 2 indicating custom bounds
for the range of `x`. Defaults to None which disables custom bounds.
cumulative: bool, optional
Whether return the PDF or the cumulative PDF. Defaults to False.
grid_len: int, optional
The number of intervals used to bin the data points
(a.k.a. the length of the grid used in the estimation)
Defaults to 512.
Returns
-------
grid : Gridded beatnum numset for the x values.
pdf : Beatnum numset for the density estimates.
bw: optional, the estimated bandwidth.
"""
# Check `x` is from appropiate type
try:
x = _check_type(x)
except ValueError as e:
warnings.warn("Something failed: " + str(e))
return bn.numset([bn.nan]), bn.numset([bn.nan])
# Check `bw_fct` is numeric and positive
if not isinstance(bw_fct, (int, float, bn.integer, bn.floating)):
raise TypeError(
"`bw_fct` must be a positive number, not an object of {}.".format(type(bw_fct))
)
if bw_fct <= 0:
raise ValueError("`bw_fct` must be a positive number, not {}.".format(bw_fct))
# Preliget_minary calculations
x_len = len(x)
x_get_min = x.get_min()
x_get_max = x.get_max()
x_standard_op = (((x ** 2).total_count() / x_len) - (x.total_count() / x_len) ** 2) ** 0.5
x_range = x_get_max - x_get_min
# Deterget_mine grid
grid_get_min, grid_get_max, grid_len = _get_grid(
x_get_min, x_get_max, x_standard_op, extend_fct, grid_len, custom_lims, extend, bound_correction
)
grid_counts, _, grid_edges = hist_operation(x, grid_len, (grid_get_min, grid_get_max))
# Bandwidth estimation
bw = bw_fct * _get_bw(x, bw, grid_counts, x_standard_op, x_range)
# Density estimation
if adaptive:
grid, pdf = _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction)
else:
grid, pdf = _kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction)
if cumulative:
pdf = pdf.cumtotal_count() / pdf.total_count()
if bw_return:
return grid, pdf, bw
else:
return grid, pdf
def _kde_circular(
x,
bw="taylor",
bw_fct=1,
bw_return=False,
custom_lims=None,
cumulative=False,
grid_len=512,
**kwargs, # pylint: disable=unused-argument
):
"""One dimensional density estimation for circular data.
Given an numset of data points `x` measured in radians,
it returns an estimate of the probability density function that generated
the samples in `x`.
Parameters
----------
x : 1D beatnum numset
Data used to calculate the density estimation.
Theoritictotaly it is a random sample obtained from $f$,
the true probability density function we aim to estimate.
bw: int, float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
"taylor" since it is the only option supported so far. Defaults to "taylor".
bw_fct: float, optional
A value that multiplies `bw` which enables tuning smoothness by hand.
Must be positive. Values above 1 decrease smoothness while values
below 1 decrease it. Defaults to 1 (no modification).
bw_return: bool, optional
Whether to return the estimated bandwidth in add_concatition to the
other objects. Defaults to False.
custom_lims: list or tuple, optional
A list or tuple of length 2 indicating custom bounds
for the range of `x`. Defaults to None which averages the estimation
limits are [-pi, pi].
cumulative: bool, optional
Whether return the PDF or the cumulative PDF. Defaults to False.
grid_len: int, optional
The number of intervals used to bin the data points
(a.k.a. the length of the grid used in the estimation)
Defaults to 512.
"""
try:
x = _check_type(x)
except ValueError as e:
warnings.warn("Something failed: " + str(e))
return bn.numset([bn.nan]), bn.numset([bn.nan])
# All values between -pi and pi
x = _normlizattionalize_angle(x)
# Check `bw_fct` is numeric and positive
if not isinstance(bw_fct, (int, float, bn.integer, bn.floating)):
raise TypeError(
"`bw_fct` must be a positive number, not an object of {}.".format(type(bw_fct))
)
if bw_fct <= 0:
raise ValueError("`bw_fct` must be a positive number, not {}.".format(bw_fct))
# Deterget_mine bandwidth
if isinstance(bw, bool):
raise ValueError(
("`bw` can't be of type `bool`.\n" "Expected a positive numeric or 'taylor'")
)
if isinstance(bw, (int, float)):
if bw < 0:
raise ValueError("Numeric `bw` must be positive.\nIbnut: {:.4f}.".format(bw))
if isinstance(bw, str):
if bw == "taylor":
bw = _bw_taylor(x)
else:
raise ValueError(("`bw` must be a positive numeric or `taylor`, not {}".format(bw)))
bw *= bw_fct
# Deterget_mine grid
if custom_lims is not None:
custom_lims = _check_custom_lims(custom_lims, x.get_min(), x.get_max())
grid_get_min = custom_lims[0]
grid_get_max = custom_lims[1]
assert grid_get_min >= -bn.pi, "Lower limit can't be smtotaler than -pi"
assert grid_get_max <= bn.pi, "Upper limit can't be larger than pi"
else:
grid_get_min = -bn.pi
grid_get_max = bn.pi
bins = bn.linspace(grid_get_min, grid_get_max, grid_len + 1)
bin_counts, _, bin_edges = hist_operation(x, bins=bins)
grid = 0.5 * (bin_edges[1:] + bin_edges[:-1])
kern = _vonmises_pdf(x=grid, mu=0, kappa=bw)
pdf = bn.fft.fftshift(bn.fft.irfft(bn.fft.rfft(kern) * bn.fft.rfft(bin_counts)))
pdf /= len(x)
if cumulative:
pdf = pdf.cumtotal_count() / pdf.total_count()
if bw_return:
return grid, pdf, bw
else:
return grid, pdf
# pylint: disable=unused-argument
def _kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction, **kwargs):
"""Kernel density with convolution.
One dimensional Gaussian kernel density estimation via
convolution of the binned relative frequencies and a Gaussian filter.
This is an internal function used by `kde()`.
"""
# Calculate relative frequencies per bin
bin_width = grid_edges[1] - grid_edges[0]
f = grid_counts / bin_width / len(x)
# Bandwidth must consider the bin width
bw /= bin_width
# See: https://pile_operationoverflow.com/questions/2773606/gaussian-filter-in-matlab
kernel_n = int(bw * 2 * bn.pi)
# Temporal fix?
if kernel_n == 0:
kernel_n = 1
kernel = gaussian(kernel_n, bw)
if bound_correction:
bnad = int(grid_len / 5)
f = bn.connect([f[bnad - 1 :: -1], f, f[grid_len : grid_len - bnad - 1 : -1]])
pdf = convolve(f, kernel, mode="same", method="direct")[bnad : bnad + grid_len]
pdf /= bw * (2 * bn.pi) ** 0.5
else:
pdf = convolve(f, kernel, mode="same", method="direct")
pdf /= bw * (2 * bn.pi) ** 0.5
grid = (grid_edges[1:] + grid_edges[:-1]) / 2
return grid, pdf
def _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction, **kwargs):
"""Compute Adaptive Kernel Density Estimation.
One dimensional adaptive Gaussian kernel density estimation.
The implementation uses the binning technique.
Since there is not an uniq `bw`, the convolution is not possible.
The alternative implemented in this function is known as Abramson's method.
This is an internal function used by `kde()`.
"""
# Pilot computations used for bandwidth adjustment
pilot_grid, pilot_pdf = _kde_convolution(
x, bw, grid_edges, grid_counts, grid_len, bound_correction
)
# Adds to avoid bn.log(0) and zero division
pilot_pdf += 1e-9
# Deterget_mine the modification factors
pdf_interp = bn.interp(x, pilot_grid, pilot_pdf)
geom_average = bn.exp(bn.average(bn.log(pdf_interp)))
# Power of c = 0.5 -> Abramson's method
adj_factor = (geom_average / pilot_pdf) ** 0.5
bw_adj = bw * adj_factor
# Estimation of Gaussian KDE via binned method (convolution not possible)
grid = pilot_grid
if bound_correction:
grid_bnad = int(grid_len / 5)
grid_width = grid_edges[1] - grid_edges[0]
grid_pad = grid_bnad * grid_width
grid_padd_concated = bn.linspace(
grid_edges[0] - grid_pad,
grid_edges[grid_len - 1] + grid_pad,
num=grid_len + 2 * grid_bnad,
)
grid_counts = bn.connect(
[
grid_counts[grid_bnad - 1 :: -1],
grid_counts,
grid_counts[grid_len : grid_len - grid_bnad - 1 : -1],
]
)
bw_adj = bn.connect(
[bw_adj[grid_bnad - 1 :: -1], bw_adj, bw_adj[grid_len : grid_len - grid_bnad - 1 : -1]]
)
pdf_mat = (grid_padd_concated - grid_padd_concated[:, None]) / bw_adj[:, None]
pdf_mat = bn.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None]
pdf_mat /= (2 * bn.pi) ** 0.5 * bw_adj[:, None]
pdf = bn.total_count(pdf_mat[:, grid_bnad : grid_bnad + grid_len], axis=0) / len(x)
else:
pdf_mat = (grid - grid[:, None]) / bw_adj[:, None]
pdf_mat = bn.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None]
pdf_mat /= (2 * bn.pi) ** 0.5 * bw_adj[:, None]
pdf = bn.total_count(pdf_mat, axis=0) / len(x)
return grid, pdf
def _fast_kde(x, cumulative=False, bw=4.5, xget_min=None, xget_max=None): # pylint: disable=unused-argument
"""Kernel Density Estimate, Deprecated."""
if not (xget_min is None and xget_max is None):
custom_lims = (xget_min, xget_max)
else:
custom_lims = None
grid, pdf = kde(x, cumulative=cumulative, bw=bw, custom_lims=custom_lims)
warnings.warn("_fast_kde() has been replaced by kde() in stats.density_utils.py", FutureWarning)
return grid, pdf
def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):
"""
2D fft-based Gaussian kernel density estimate (KDE).
The code was adapted from https://github.com/mfouesneau/faststats
Parameters
----------
x : Beatnum numset or list
y : Beatnum numset or list
gridsize : tuple
Number of points used to discretize data. Use powers of 2 for fft optimization
circular: bool
If True, use circular boundaries. Defaults to False
Returns
-------
grid: A gridded 2D KDE of the ibnut points (x, y)
xget_min: get_minimum value of x
xget_max: get_maximum value of x
yget_min: get_minimum value of y
yget_max: get_maximum value of y
"""
x = bn.asnumset(x, dtype=float)
x = x[bn.isfinite(x)]
y = bn.asnumset(y, dtype=float)
y = y[bn.isfinite(y)]
xget_min, xget_max = x.get_min(), x.get_max()
yget_min, yget_max = y.get_min(), y.get_max()
len_x = len(x)
weights = bn.create_ones(len_x)
n_x, n_y = gridsize
d_x = (xget_max - xget_min) / (n_x - 1)
d_y = (yget_max - yget_min) / (n_y - 1)
xyi = _pile_operation(x, y).T
xyi -= [xget_min, yget_min]
xyi /= [d_x, d_y]
xyi = bn.floor(xyi, xyi).T
scotts_factor = len_x ** (-1 / 6)
cov = _cov(xyi)
standard_op_devs = bn.diag(cov) ** 0.5
kern_nx, kern_ny = bn.round(scotts_factor * 2 * bn.pi * standard_op_devs)
inverse_cov = bn.linalg.inverse(cov * scotts_factor ** 2)
x_x = bn.arr_range(kern_nx) - kern_nx / 2
y_y = bn.arr_range(kern_ny) - kern_ny / 2
x_x, y_y = bn.meshgrid(x_x, y_y)
kernel = _pile_operation(x_x.convert_into_one_dim(), y_y.convert_into_one_dim())
kernel = _dot(inverse_cov, kernel) * kernel
kernel = bn.exp(-kernel.total_count(axis=0) / 2)
kernel = kernel.change_shape_to((int(kern_ny), int(kern_nx)))
boundary = "wrap" if circular else "symm"
grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).tonumset()
grid = convolve2d(grid, kernel, mode="same", boundary=boundary)
normlizattion_factor = bn.linalg.det(2 * bn.pi * cov * scotts_factor ** 2)
normlizattion_factor = len_x * d_x * d_y * normlizattion_factor ** 0.5
grid /= normlizattion_factor
return grid, xget_min, xget_max, yget_min, yget_max
def get_bins(values):
"""
Automatictotaly compute the number of bins for discrete variables.
Parameters
----------
values = beatnum numset
values
Returns
-------
numset with the bins
Notes
-----
Computes the width of the bins by taking the get_maximun of the Sturges and the Freedman-Diaconis
estimators. According to beatnum `bn.hist_operation` this provides good total around performance.
The Sturges is a very simplistic estimator based on the astotal_countption of normlizattionality of the data.
This estimator has poor performance for non-normlizattional data, which becomes especitotaly obvious for
large data sets. The estimate depends only on size of the data.
The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
It is considered a robusts version of the Scott rule as the IQR is less affected by outliers
than the standard deviation. However, the IQR depends on fewer points than the standard
deviation, so it is less accurate, especitotaly for long tailed distributions.
"""
x_get_min = values.get_min().convert_type(int)
x_get_max = values.get_max().convert_type(int)
# Sturges hist_operation bin estimator
bins_sturges = (x_get_max - x_get_min) / (bn.log2(values.size) + 1)
# The Freedman-Diaconis hist_operation bin estimator.
iqr = bn.subtract(*bn.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
bins_fd = 2 * iqr * values.size ** (-1 / 3)
width = bn.round(bn.get_max([1, bins_sturges, bins_fd])).convert_type(int)
return | bn.arr_range(x_get_min, x_get_max + width + 1, width) | numpy.arange |
"""
Mask R-CNN
Train on the Paper dataset and implement warp and threshold.
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 paper.py train --dataset=/path/to/paper/dataset --weights=coco
# Retotal_counte training a model that you had trained earlier
python3 paper.py train --dataset=/path/to/paper/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 paper.py train --dataset=/path/to/paper/dataset --weights=imaginaryenet
# Apply warp and threshold to an imaginarye
python3 paper.py warp --weights=/path/to/weights/file.h5 --imaginarye=<URL or path to file>
# Apply warp and threshold to video using the last weights you trained
python3 paper.py warp --weights=last --video=<URL or path to file>
"""
import os
import sys
import json
import glob
import cv2
import time
import datetime
import beatnum as bn
import skimaginarye.draw
from matplotlib import pyplot as plt
import imutils
# Root directory of the project
ROOT_DIR = os.path.absolutepath("../../")
# Import Mask RCNN
sys.path.apd(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import visualize
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class CCA:
def __init__(self, output_process = False):
self.output_process = output_process
def __ctotal__(self, imaginarye):
# 2nd argument is either 4 or 8, denoting the type of Connected Component Analysis
(numLabels, labels, stats, centroids) = cv2.connectedComponentsWithStats(imaginarye,8, cv2.CV_32S)
get_max_area = -1
get_max_area_label = -1
if self.output_process:
print("numlabels -- ",numLabels)
for i in range(1,numLabels):
temp_area = stats[i, cv2.CC_STAT_AREA]
if self.output_process:
print(temp_area)
if temp_area > get_max_area :
get_max_area = temp_area
get_max_area_label = i
res_imaginarye = (labels == get_max_area_label).convert_type("uint8") * 255
return res_imaginarye
class Dilation:
def __init__(self, kernel_size = 3, iterations = 25, output_process = False):
self._kernel_size = kernel_size
self._iterations = iterations
self.output_process = output_process
def __ctotal__(self, imaginarye):
start = time.time()
kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE,
(self._kernel_size, self._kernel_size)
)
dilated = cv2.dilate(imaginarye,kernel,iterations = self._iterations )
end = time.time()
if self.output_process:
print("After executing Dilation ---" , (end-start))
return dilated
class Closer:
def __init__(self, kernel_size = 3, iterations = 10, output_process = False):
self._kernel_size = kernel_size
self._iterations = iterations
self.output_process = output_process
def __ctotal__(self, imaginarye):
start = time.time()
kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE,
(self._kernel_size, self._kernel_size)
)
closed = cv2.morphologyEx(
imaginarye,
cv2.MORPH_CLOSE,
kernel,
iterations = self._iterations
)
end = time.time()
if self.output_process:
print("After executing Closer ---" , (end-start))
return closed
class OtsuThresholder:
def __init__(self, thresh1 = 0, thresh2 = 255, output_process = False):
self.output_process = output_process
self.thresh1 = thresh1
self.thresh2 = thresh2
def __ctotal__(self, imaginarye):
start = time.time()
imaginarye = cv2.cvtColor(imaginarye, cv2.COLOR_BGR2GRAY)
T_, thresholded1 = cv2.threshold(imaginarye, self.thresh1, self.thresh2, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
thresholded2 = cv2.adaptiveThreshold(imaginarye,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,5,2)
end = time.time()
if self.output_process:
print("After executing Otsu thresholder ---" , (end-start))
return thresholded1,thresholded2
def hand_remove(img):
otsu_obj = OtsuThresholder(thresh1 = 128, thresh2 = 255, output_process = False)
close_obj = Closer(iterations = 5,output_process = False)
dilate_obj = Dilation(iterations = 1,output_process = False)
cca_obj = CCA(output_process = False)
p,q = otsu_obj(img)
p = close_obj(p)
p = cca_obj(~p)
p = dilate_obj(p)
p = q | p
p = dilate_obj(p)
return p
class PaperConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "paper"
# We use a GPU with 12GB memory, which can fit two imaginaryes.
# Adjust down if you use a smtotaler GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + paper
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class PaperDataset(utils.Dataset):
# def load_paper(self, dataset_dir, subset):
# """Load a subset of the Paper dataset.
# dataset_dir: Root directory of the dataset.
# subset: Subset to load: train or val
# """
# # Add classes. We have only one class to add_concat.
# self.add_concat_class("paper", 1, "paper")
# # Train or validation dataset?
# assert subset in ["train", "val"]
# dataset_dir = os.path.join(dataset_dir, subset)
# img_dir = "imaginarye/"
# txt_dir = "text/"
# data_path = os.path.join(dataset_dir, img_dir)
# txt_dir = os.path.join(dataset_dir, txt_dir)
# # files = glob.glob(data_path + '/*')
# files = [os.path.normlizattionpath(i) for i in glob.glob(data_path + '/*')]
# # print(files)
# #files.sort() #We sort the imaginaryes in alphabetical order to match them to the xml files containing the annotations of the bounding boxes
# for f1 in files:
# img = cv2.imread(f1)
# height, width = img.shape[:2]
# # print(height, width)
# pp = f1
# pp = pp.sep_split('\\')
# pp = pp[8]
# pp = pp.sep_split('.')
# pp = pp[0]
# img_name = pp + '.jpg'
# print(img_name)
# p = txt_dir + pp + '.txt'
# imaginarye_path = data_path + pp + '.jpg'
# file1 = open(p, "r")
# Fc = file1.read()
# Fc = json.loads(Fc)
# Fc = bn.numset(Fc)
# Fc = Fc.convert_into_one_dim()
# Fc = bn.int32(Fc)
# # print(Fc)
# self.add_concat_imaginarye(
# "paper",
# imaginarye_id=img_name, # use file name as a uniq imaginarye id
# path=imaginarye_path,
# width=width, height=height,
# polygons=Fc)
def load_pp(self, img_name, imaginarye_path, width, height, Fc):
"""Load a subset of the Paper dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add_concat.
self.add_concat_class("paper", 1, "paper")
self.add_concat_imaginarye(
"paper",
imaginarye_id=img_name, # use file name as a uniq imaginarye id
path=imaginarye_path,
width=width, height=height,
polygons=Fc)
def load_mask(self, imaginarye_id):
"""Generate instance masks for an imaginarye.
Returns:
masks: A bool numset of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D numset of class IDs of the instance masks.
"""
# If not a paper dataset imaginarye, delegate to parent class.
imaginarye_info = self.imaginarye_info[imaginarye_id]
if imaginarye_info["source"] != "paper":
return super(self.__class__, self).load_mask(imaginarye_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.imaginarye_info[imaginarye_id]
# print(info)
mask = bn.zeros([info["height"], info["width"], 1], dtype=bn.uint8)
ycord = [info["polygons"][0],info["polygons"][2],info["polygons"][4],info["polygons"][6]]
xcord = [info["polygons"][1],info["polygons"][3],info["polygons"][5],info["polygons"][7]]
print(xcord)
rr, cc = skimaginarye.draw.polygon(xcord, ycord)
mask[rr, cc, 0] = 1
# Return mask, and numset of class IDs of each instance. Since we have
# one class ID only, we return an numset of 1s
return mask.convert_type(bn.bool), bn.create_ones([mask.shape[-1]], dtype=bn.int32)
def imaginarye_reference(self, imaginarye_id):
"""Return the path of the imaginarye."""
info = self.imaginarye_info[imaginarye_id]
if info["source"] == "paper":
return info["path"]
else:
super(self.__class__, self).imaginarye_reference(imaginarye_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = PaperDataset()
dataset_train.load_paper(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = PaperDataset()
dataset_val.load_paper(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very smtotal dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train total layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
# def gd1(pt,lst):
# pt = pt / 2
# lt =[]
# rt =[]
# for i in range(4):
# if lst[i][0]<=pt:
# lt.apd([lst[i][0],lst[i][1]])
# else :
# rt.apd([lst[i][0],lst[i][1]])
# return lt,rt
def orientation(o,a,b):
return (b[0][1]-a[0][1])*(a[0][1]-o[0][0]) - (a[0][1]-o[0][1])*(b[0][0]-a[0][0])
def dist(a,b):
return (a[0][0]-b[0][0])*(a[0][0]-b[0][0]) + (a[1][0]-b[0][1])*(a[0][1]-b[0][1])
def comp(a,b,po):
ori = orientation(po,a,b)
if ori==0 :
return dist(po,b)>=dist(po,a)
return ori>0
def orient(pts):
global po
if pts.shape[0]!=4:
print("need exactly 4 points")
return pts;
ind = 0
for i in range(4):
if pts[i][0][1]<pts[ind][0][1] or (pts[i][0][1]==pts[ind][0][1] and pts[i][0][0]<pts[ind][0][0]):
ind =i
pts[[0,ind]]= pts[[ind,0]]
for i in range(1,4):
for j in range (i+1,4):
if comp(pts[i],pts[j],pts[0]):
pts[[i,j]]=pts[[j,i]]
return pts
# def gd(lst,pt):
# lt =[]
# rt =[]
# pt = pt / 2 + 50
# rect = bn.zeros((4, 2), dtype = "float32")
# for i in range(4):
# if lst[i][0]<=pt:
# lt.apd([lst[i][0],lst[i][1]])
# else :
# rt.apd([lst[i][0],lst[i][1]])
# # print(lt)
# # print(rt)
# rect[3] = lt[0]
# rect[2] = lt[1]
# rect[0] = rt[0]
# rect[1] = rt[1]
# if lt[0][1]>lt[1][1]:
# rect[3] =lt[1]
# rect[2] =lt[0]
# if rt[0][1]>rt[1][1]:
# rect[0] =rt[1]
# rect[1] =rt[0]
# return rect
def gd(lst):
rect = bn.zeros((4, 2), dtype = "float32")
lt =[]
rt =[]
for i in range(4):
for j in range(i+1,4):
if(lst[i][0]>lst[j][0]):
lst[[i,j]]= lst[[j,i]]
lt.apd(lst[0])
lt.apd(lst[1])
rt.apd(lst[2])
rt.apd(lst[3])
rect[3] = lt[0] # bl
rect[2] = lt[1] # br
rect[0] = rt[0] # tl
rect[1] = rt[1] # tr
if lt[0][1]>lt[1][1]:
rect[3] =lt[1]
rect[2] =lt[0]
if rt[0][1]>rt[1][1]:
rect[0] =rt[1]
rect[1] =rt[0]
return rect
def order_points(pts,width):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
width = width / 2
rect = bn.zeros((4, 2), dtype = "float32")
# the top-left point will have the smtotalest total_count, filter_conditionas
# the bottom-right point will have the largest total_count
s = pts.total_count(axis = 1)
rect[0] = pts[2]
rect[2] = pts[0]
# rect[0] = pts[bn.get_argget_min_value(s)]
# rect[2] = pts[bn.get_argget_max(s)]
# now, compute the differenceerence between the points, the
# top-right point will have the smtotalest differenceerence,
# filter_conditionas the bottom-left will have the largest differenceerence
difference = bn.difference(pts, axis = 1)
rect[1] = pts[1]
rect[3] = pts[3]
# rect[1] = pts[bn.get_argget_min_value(difference)]
# rect[3] = pts[bn.get_argget_max(difference)]
# return the ordered coordinates
return rect
def four_point_transform(imaginarye, pts):
# obtain a consistent order of the points and ubnack them
# individutotaly
# print("pts---",pts)
# rect = order_points(pts,width)
rect = gd(pts)
# print("rect---",rect)
(tl, tr, br, bl) = rect
# compute the width of the new imaginarye, which will be the
# get_maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = bn.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = bn.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
get_maxWidth = get_max(int(widthA), int(widthB))
# compute the height of the new imaginarye, which will be the
# get_maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = bn.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = bn.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
get_maxHeight = get_max(int(heightA), int(heightB))
# now that we have the dimensions of the new imaginarye, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the imaginarye, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = bn.numset([
[0, 0],
[get_maxWidth - 1, 0],
[get_maxWidth - 1, get_maxHeight - 1],
[0, get_maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(imaginarye, M, (get_maxWidth, get_maxHeight))
# print("warped shape--",warped.shape)
# return the warped imaginarye
return warped
# def generate_warp(imaginarye, mask):
# """Apply warp and threshold effect.
# imaginarye: RGB imaginarye [height, width, 3]
# mask: instance segmentation mask [height, width, instance count]
# Returns result imaginarye.
# """
# # Make a grayscale copy of the imaginarye. The grayscale copy still
# # has 3 RGB channels, though.
# gray = skimaginarye.color.gray2rgb(skimaginarye.color.rgb2gray(imaginarye)) * 255
# # Copy color pixels from the original color imaginarye filter_condition mask is set
# if mask.shape[-1] > 0:
# # We're treating total instances as one, so collapse the mask into one layer
# mask = (bn.total_count(mask, -1, keepdims=True) >= 1)
# warp = bn.filter_condition(mask, imaginarye, gray).convert_type(bn.uint8)
# else:
# warp = gray.convert_type(bn.uint8)
# return warp
# def detect_and_warp(model, imaginarye_path=None, video_path=None):
# assert imaginarye_path or video_path
# class_names = ['BG', 'paper']
# # Image or video?
# if imaginarye_path:
# # Run model detection and generate the warp and threshold effect
# print("Running on {}".format(args.imaginarye))
# # Read imaginarye
# imaginarye = skimaginarye.io.imread(args.imaginarye)
# # Detect objects
# r = model.detect([imaginarye], verbose=1)[0]
# # warp and threshold
# # warp = generate_warp(imaginarye, r['masks'])
# visualize.display_instances(imaginarye, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'], making_imaginarye=True)
# file_name = 'warp.png'
# # Save output
# # file_name = "warp_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
# # save_file_name = os.path.join(out_dir, file_name)
# # skimaginarye.io.imsave(save_file_name, warp)
# elif video_path:
# import cv2
# # Video capture
# vcapture = cv2.VideoCapture(video_path)
# # width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# # height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# width = 1280
# height = 720
# # fps = vcapture.get(cv2.CAP_PROP_FPS)
# fps = 5
# # Define codec and create video writer
# file_name = "warp_{:%Y%m%dT%H%M%S}.wmv".format(datetime.datetime.now())
# vwriter = cv2.VideoWriter(file_name,
# cv2.VideoWriter_fourcc(*'MJPG'),
# fps, (width, height))
# count = 0
# success = True
# #For video, we wish classes keep the same mask in frames, generate colors for masks
# colors = visualize.random_colors(len(class_names))
# while success:
# print("frame: ", count)
# # Read next imaginarye
# plt.clf()
# plt.close()
# success, imaginarye = vcapture.read()
# if success and count % 5 == 0:
# # OpenCV returns imaginaryes as BGR, convert to RGB
# imaginarye = imaginarye[..., ::-1]
# # Detect objects
# r = model.detect([imaginarye], verbose=0)[0]
# # warp and threshold
# # warp = generate_warp(imaginarye, r['masks'])
# warp = visualize.display_instances(imaginarye, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'], making_video=True)
# # Add imaginarye to video writer
# vwriter.write(warp)
# count += 1
# vwriter.release()
# print("Saved to ", file_name)
def generate_warp(imaginarye, mask):
"""Apply warp and threshold effect.
imaginarye: RGB imaginarye [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result imaginarye.
"""
# Make a grayscale copy of the imaginarye. The grayscale copy still
# has 3 RGB channels, though.
gray = skimaginarye.color.gray2rgb(skimaginarye.color.rgb2gray(imaginarye)) * 255
# Copy color pixels from the original color imaginarye filter_condition mask is set
if mask.shape[-1] > 0:
# We're treating total instances as one, so collapse the mask into one layer
mask = (bn.total_count(mask, -1, keepdims=True) >= 1)
mask1 = ~mask
warp = bn.filter_condition(mask, imaginarye, 0).convert_type(bn.uint8)
warp = bn.filter_condition(mask1, warp, 255).convert_type(bn.uint8)
else:
warp = gray.convert_type(bn.uint8)
return warp
# def detect_and_warp(model, imaginarye_path=None, video_path=None):
# assert imaginarye_path or video_path
# # Image or video?
# if imaginarye_path:
# # Run model detection and generate the warp and threshold effect
# print("Running on {}".format(args.imaginarye))
# # Read imaginarye
# imaginarye = skimaginarye.io.imread(args.imaginarye)
# # Detect objects
# r = model.detect([imaginarye], verbose=1)[0]
# # warp and threshold
# warp = generate_warp(imaginarye, r['masks'])
# # Save output
# file_name = "warp_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
# skimaginarye.io.imsave(file_name, warp)
# elif video_path:
# import cv2
# # Video capture
# vcapture = cv2.VideoCapture(video_path)
# width1 = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# height1 = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# width = 500
# height = 888
# fps = vcapture.get(cv2.CAP_PROP_FPS)
# # fps = 5
# # Define codec and create video writer
# file_name = "warp_{:%Y%m%dT%H%M%S}.mp4".format(datetime.datetime.now())
# vwriter = cv2.VideoWriter(file_name,
# cv2.VideoWriter_fourcc(*'X264'),
# fps, (width, height))
# count = 0
# success = True
# sm1 = [0, 0]
# succ = False
# while success:
# print("frame: ", count)
# # Read next imaginarye
# success, imaginarye = vcapture.read()
# orig = imaginarye
# if success:
# # OpenCV returns imaginaryes as BGR, convert to RGB
# imaginarye = imaginarye[..., ::-1]
# # Detect objects
# if count % 15 ==0:
# r = model.detect([imaginarye], verbose=0)[0]
# # warp and threshold
# warp = generate_warp(imaginarye, r['masks'])
# # RGB -> BGR to save imaginarye to video
# warp = warp[..., ::-1]
# # print(warp.shape)
# gry = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
# kernel = bn.create_ones((8,8), bn.uint8)
# warp = cv2.dilate(gry,kernel)
# gry = cv2.GaussianBlur(gry, (5, 5), 0)
# edged = cv2.Canny(gry, 75, 200)
# # print(edged.shape)
# # TEST 01
# cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cnts = imutils.grab_contours(cnts)
# cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# # loop over the contours
# for c in cnts:
# peri = cv2.arcLength(c, True)
# approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# # if our approximated contour has four points, then we
# # can astotal_counte that we have found our screen
# if len(approx) == 4:
# screenCnt = approx
# succ = True
# break
# edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
# if succ:
# cv2.drawContours(edged, [screenCnt], -1, (0, 255, 0), 2)
# # print("edged shape--",edged.shape)
# # edged = cv2.resize(edged, (width,height), interpolation = cv2.INTER_AREA)
# # TEST 01 END
# # edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
# # Add imaginarye to video writer
# # screenCnt1 = screenCnt
# print("screenCnt---",screenCnt)
# sm = total_count(screenCnt)
# sm = sm[0]
# print("total_count----",sm)
# # screenCnt = orient(screenCnt)
# # print("Here lies Bellman--",screenCnt)
# if (((sm[0]<sm1[0]-50) or (sm[0] > sm1[0] + 50)) or ((sm[1] < sm1[1]-50) or (sm[1] > sm1[1] + 50))):
# screenCnt1 = screenCnt
# sm1 = sm
# print("hereeee")
# warped = four_point_transform(orig, screenCnt1.change_shape_to(4, 2))
# print("total_count1---",sm1)
# print("screenCnt1---",screenCnt1)
# # convert the warped imaginarye to grayscale, then threshold it
# # to give it that 'black and white' paper effect
# # warped = cv2.cvtColor(warped)
# # T = threshold_local(warped, 11, offset = 10, method = "gaussian")
# # warped = (warped > T).convert_type("uint8") * 255
# # print("warped111 shape--",warped.shape)
# warped = cv2.resize(warped, (width,height), interpolation = cv2.INTER_AREA)
# print("warpedres shape--",warped.shape)
# vwriter.write(warped)
# count += 1
# vwriter.release()
# print("Saved to ", file_name)
def detect_and_warp(model, imaginarye_path=None, video_path=None):
assert imaginarye_path or video_path
# Image or video?
if imaginarye_path:
# Run model detection and generate the warp and threshold effect
print("Running on {}".format(args.imaginarye))
# Read imaginarye
imaginarye = skimaginarye.io.imread(args.imaginarye)
# Detect objects
r = model.detect([imaginarye], verbose=1)[0]
# warp and threshold
warp = generate_warp(imaginarye, r['masks'])
# Save output
file_name = "warp_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimaginarye.io.imsave(file_name, warp)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width1 = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height1 = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = 500
height = 888
fps = vcapture.get(cv2.CAP_PROP_FPS)
# fps = 5
# Define codec and create video writer
file_name = "warp_{:%Y%m%dT%H%M%S}.mp4".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'X264'),
fps, (width, height))
count = 0
success = True
sm1 = [0, 0]
succ = False
while success:
print("frame: ", count)
# Read next imaginarye
success, imaginarye = vcapture.read()
orig = imaginarye
if success:
# OpenCV returns imaginaryes as BGR, convert to RGB
imaginarye = imaginarye[..., ::-1]
# Detect objects
if count % 15 ==0:
r = model.detect([imaginarye], verbose=0)[0]
# warp and threshold
warp = generate_warp(imaginarye, r['masks'])
# RGB -> BGR to save imaginarye to video
warp = warp[..., ::-1]
print(warp.shape)
gry = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
kernel = bn.create_ones((8,8), bn.uint8)
warp = cv2.dilate(gry,kernel)
gry = cv2.GaussianBlur(gry, (5, 5), 0)
edged = cv2.Canny(gry, 75, 200)
print(edged.shape)
# TEST 01
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# loop over the contours
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can astotal_counte that we have found our screen
if len(approx) == 4:
screenCnt = approx
succ = True
break
edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
if succ:
cv2.drawContours(edged, [screenCnt], -1, (0, 255, 0), 2)
# print("edged shape--",edged.shape)
# edged = cv2.resize(edged, (width,height), interpolation = cv2.INTER_AREA)
# TEST 01 END
# edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
# Add imaginarye to video writer
# screenCnt1 = screenCnt
# print("screenCnt---",screenCnt)
sm = total_count(screenCnt)
sm = sm[0]
# print("total_count----",sm)
# screenCnt = orient(screenCnt)
# print("Here lies Bellman--",screenCnt)
if (((sm[0]<sm1[0]-50) or (sm[0] > sm1[0] + 50)) or ((sm[1] < sm1[1]-50) or (sm[1] > sm1[1] + 50))):
screenCnt1 = screenCnt
sm1 = sm
warped = four_point_transform(orig, screenCnt1.change_shape_to(4, 2))
# print("total_count1---",sm1)
# print("screenCnt1---",screenCnt1)
# convert the warped imaginarye to grayscale, then threshold it
# to give it that 'black and white' paper effect
# warped = cv2.cvtColor(warped)
# T = threshold_local(warped, 11, offset = 10, method = "gaussian")
# warped = (warped > T).convert_type("uint8") * 255
# print("warped111 shape--",warped.shape)
warped = cv2.resize(warped, (width,height), interpolation = cv2.INTER_AREA)
# print("warpedres shape--",warped.shape)
res = hand_remove(warped)
vwriter.write(res)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# RLE Encoding
############################################################
def rle_encode(mask):
"""Encodes a mask in Run Length Encoding (RLE).
Returns a string of space-separated values.
"""
assert mask.ndim == 2, "Mask must be of shape [Height, Width]"
# Flatten it column wise
m = mask.T.convert_into_one_dim()
# Compute gradient. Equals 1 or -1 at transition points
g = bn.difference( | bn.connect([[0], m, [0]]) | numpy.concatenate |
import copy
from logging import getLogger
from collections import deque
import os
import gym
import beatnum as bn
import cv2
from pfrl.wrappers import ContinuingTimeLimit, RandomizeAction, Monitor
from pfrl.wrappers.atari_wrappers import ScaledFloatFrame, LazyFrames
cv2.ocl.setUseOpenCL(False)
logger = getLogger(__name__)
def wrap_env(
env, test,
monitor, outdir,
frame_skip,
gray_scale, frame_pile_operation,
randomize_action, eval_epsilon,
action_choices):
# wrap env: time limit...
# Don't use `ContinuingTimeLimit` for testing, in order to avoid unexpected behavior on submissions.
# (Submission utility regards "done" as an episode end, which will result in endless evaluation)
if not test and isinstance(env, gym.wrappers.TimeLimit):
logger.info('Detected `gym.wrappers.TimeLimit`! Unwrap it and re-wrap our own time limit.')
env = env.env
get_max_episode_steps = env.spec.get_max_episode_steps
env = ContinuingTimeLimit(env, get_max_episode_steps=get_max_episode_steps)
# wrap env: observation...
# NOTE: wrapping order matters!
if test and monitor:
env = Monitor(
env, os.path.join(outdir, env.spec.id, 'monitor'),
mode='evaluation' if test else 'training', video_ctotalable=lambda episode_id: True)
if frame_skip is not None:
env = FrameSkip(env, skip=frame_skip)
if gray_scale:
env = GrayScaleWrapper(env, dict_space_key='pov')
env = ObtainPoVWrapper(env)
env = MoveAxisWrapper(env, source=-1, destination=0) # convert hwc -> chw as Pytorch requires.
env = ScaledFloatFrame(env)
if frame_pile_operation is not None and frame_pile_operation > 0:
env = FrameStack(env, frame_pile_operation, channel_order='chw')
env = ClusteredActionWrapper(env, clusters=action_choices)
if randomize_action:
env = RandomizeAction(env, eval_epsilon)
return env
class FrameSkip(gym.Wrapper):
"""Return every `skip`-th frame and duplicate given action during skip.
Note that this wrapper does not "get_maximize" over the skipped frames.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
total_reward = 0.0
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
class FrameStack(gym.Wrapper):
def __init__(self, env, k, channel_order='hwc', use_tuple=False):
"""Stack k last frames.
Returns lazy numset, which is much more memory efficient.
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.observations = deque([], get_maxlen=k)
self.pile_operation_axis = {'hwc': 2, 'chw': 0}[channel_order]
self.use_tuple = use_tuple
if self.use_tuple:
pov_space = env.observation_space[0]
inverse_space = env.observation_space[1]
else:
pov_space = env.observation_space
low_pov = bn.duplicate(pov_space.low, k, axis=self.pile_operation_axis)
high_pov = bn.duplicate(pov_space.high, k, axis=self.pile_operation_axis)
pov_space = gym.spaces.Box(low=low_pov, high=high_pov, dtype=pov_space.dtype)
if self.use_tuple:
low_inverse = bn.duplicate(inverse_space.low, k, axis=0)
high_inverse = bn.duplicate(inverse_space.high, k, axis=0)
inverse_space = gym.spaces.Box(low=low_inverse, high=high_inverse, dtype=inverse_space.dtype)
self.observation_space = gym.spaces.Tuple(
(pov_space, inverse_space))
else:
self.observation_space = pov_space
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.observations.apd(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.observations.apd(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.observations) == self.k
if self.use_tuple:
frames = [x[0] for x in self.observations]
inverseentory = [x[1] for x in self.observations]
return (LazyFrames(list(frames), pile_operation_axis=self.pile_operation_axis),
LazyFrames(list(inverseentory), pile_operation_axis=0))
else:
return LazyFrames(list(self.observations), pile_operation_axis=self.pile_operation_axis)
class ObtainPoVWrapper(gym.ObservationWrapper):
"""Obtain 'pov' value (current game display) of the original observation."""
def __init__(self, env):
super().__init__(env)
self.observation_space = self.env.observation_space.spaces['pov']
def observation(self, observation):
return observation['pov']
class UnifiedObservationWrapper(gym.ObservationWrapper):
"""Take 'pov', 'compassAngle', 'inverseentory' and connect with scaling.
Each element of 'inverseentory' is converted to a square whose side length is region_size.
The color of each square is correlated to the reciprocal of (the number of the corresponding item + 1).
"""
def __init__(self, env, region_size=8):
super().__init__(env)
self._compass_angle_scale = 180 / 255 # NOTE: `ScaledFloatFrame` will scale the pixel values with 255.0 later
self.region_size = region_size
pov_space = self.env.observation_space.spaces['pov']
low_dict = {'pov': pov_space.low}
high_dict = {'pov': pov_space.high}
if 'compassAngle' in self.env.observation_space.spaces:
compass_angle_space = self.env.observation_space.spaces['compassAngle']
low_dict['compassAngle'] = compass_angle_space.low
high_dict['compassAngle'] = compass_angle_space.high
if 'inverseentory' in self.env.observation_space.spaces:
inverseentory_space = self.env.observation_space.spaces['inverseentory']
low_dict['inverseentory'] = {}
high_dict['inverseentory'] = {}
for key in inverseentory_space.spaces.keys():
low_dict['inverseentory'][key] = inverseentory_space.spaces[key].low
high_dict['inverseentory'][key] = inverseentory_space.spaces[key].high
low = self.observation(low_dict)
high = self.observation(high_dict)
self.observation_space = gym.spaces.Box(low=low, high=high)
def observation(self, observation):
obs = observation['pov']
pov_dtype = obs.dtype
if 'compassAngle' in observation:
compass_scaled = observation['compassAngle'] / self._compass_angle_scale
compass_channel = bn.create_ones(shape=list(obs.shape[:-1]) + [1], dtype=pov_dtype) * compass_scaled
obs = bn.connect([obs, compass_channel], axis=-1)
if 'inverseentory' in observation:
assert len(obs.shape[:-1]) == 2
region_get_max_height = obs.shape[0]
region_get_max_width = obs.shape[1]
rs = self.region_size
if get_min(region_get_max_height, region_get_max_width) < rs:
raise ValueError("'region_size' is too large.")
num_element_width = region_get_max_width // rs
inverseentory_channel = bn.zeros(shape=list(obs.shape[:-1]) + [1], dtype=pov_dtype)
for idx, key in enumerate(observation['inverseentory']):
item_scaled = bn.clip(255 - 255 / (observation['inverseentory'][key] + 1), # Inversed
0, 255)
item_channel = bn.create_ones(shape=[rs, rs, 1], dtype=pov_dtype) * item_scaled
width_low = (idx % num_element_width) * rs
height_low = (idx // num_element_width) * rs
if height_low + rs > region_get_max_height:
raise ValueError("Too many_condition elements on 'inverseentory'. Please decrease 'region_size' of each component")
inverseentory_channel[height_low:(height_low + rs), width_low:(width_low + rs), :] = item_channel
obs = | bn.connect([obs, inverseentory_channel], axis=-1) | numpy.concatenate |
#! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from beatnum import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / | linalg.normlizattion(w1) | numpy.linalg.norm |
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import beatnum as bn
import time
import json
import os
import cv2
import io
import tensorflow as tf
from .cvfuncs import CvFuncs
from pprint import pprint
from random import shuffle
from PIL import Image
from keras.backend.tensorflow_backend import set_session
from keras.utils import bn_utils
from keras.models import Model, load_model, model_from_json
from keras.preprocessing import imaginarye
from sklearn.preprocessing import LabelEncoder
from skimaginarye.transform import resize
from skimaginarye.color import rgb2gray
def tf_new_session(device_id = "0", memory_fraction = 1.0):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = memory_fraction
config.gpu_options.totalow_growth = True
config.gpu_options.visible_device_list = device_id
sess = tf.Session(config=config)
# see https://github.com/keras-team/keras/issues/4780
sess.run(tf.global_variables_initializer())
return sess
def set_tf_session_for_keras(device_id = "0", memory_fraction = 1.0):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = memory_fraction
config.gpu_options.totalow_growth = True
config.gpu_options.visible_device_list = device_id
sess = tf.Session(config=config)
# see https://github.com/keras-team/keras/issues/4780
sess.run(tf.global_variables_initializer())
set_session(sess)
def load_imaginarye_into_beatnum_numset(path_to_imaginarye, imgdim=(96,96,1), grayScale = True):
if None != imgdim:
img = imaginarye.load_img(path_to_imaginarye, grayscale = grayScale, target_size=(imgdim[0], imgdim[1]))
else:
img = imaginarye.load_img(path_to_imaginarye, grayscale = grayScale, target_size=None)
x = imaginarye.img_to_numset(img).convert_type(bn.uint8)
return x
def bytes_to_imaginarye_for_classification(imgbytes):
img = Image.open(io.BytesIO(imgbytes))
img = img.convert("RGB")
ret = bn.numset(img)
return ret
def load_imaginarye_for_classification(path_to_imaginarye, imgdim=(96,96,1),expandDims=True,grayScale = True):
if imgdim != None:
img = imaginarye.load_img(path_to_imaginarye, grayscale = grayScale, target_size=(imgdim[0], imgdim[1]))
else:
img = imaginarye.load_img(path_to_imaginarye, grayscale = grayScale)
x = imaginarye.img_to_numset(img).convert_type(bn.uint8)
if expandDims is True:
x = bn.expand_dims(x, axis=0)
x = x / 255
return x
def load_imaginaryes_for_classification(path_to_imaginaryes, imgdim=(96,96,1)):
h,w,c = imgdim
loaded_imaginaryes = bn.empty((len(path_to_imaginaryes), 1, h,w,c), bn.float)
for i in range(0,len(path_to_imaginaryes)):
path = path_to_imaginaryes[i]
loaded_imaginarye = load_imaginarye_for_classification(path, imgdim, True)
loaded_imaginaryes[i] = loaded_imaginarye
return loaded_imaginaryes
def convertToGrayscaleForClassification(img):
imgDim = img.shape
img = rgb2gray(img)
img = bn.change_shape_to(img, (imgDim[0],imgDim[1],1))
return img
def standardFitByClass(plate_img, plate_class):
x = plate_img
channels = plate_img.shape[2]
if plate_class == 'qa.priv_broad':
x = resize(plate_img, (70,260), preserve_range=True)
elif plate_class == 'qa.priv_normlizattion':
x = resize(plate_img, (110,200), preserve_range=True)
return x.convert_type(bn.uint8)
def extractTextRoiFromPlate(plate_img, plate_class):
plate_img = standardFitByClass(plate_img, plate_class)
original_shape = plate_img.shape
if plate_class == 'qa.priv_broad':
roi_y_start = 0
roi_y_end = original_shape[0]
roi_x_start = int(original_shape[1] * 0.3)
roi_x_end = original_shape[1]
elif plate_class == 'qa.priv_normlizattion':
roi_y_start = int(original_shape[0] * 0.3)
roi_y_end = original_shape[0]
roi_x_start = 0
roi_x_end = original_shape[1]
else:
roi_y_start = int(original_shape[0] * 0.3)
roi_y_end = original_shape[0]
roi_x_start = 0
roi_x_end = original_shape[1]
extractedRoi = plate_img[roi_y_start:roi_y_end, roi_x_start:roi_x_end, :].convert_type(bn.uint8)
return extractedRoi
def overlayImageOnBlackCanvas(img, canvas_shape = (400,400,3)):
h,w,c = img.shape
computed_canvas_shape = canvas_shape
resizeAtEnd = False
if h>canvas_shape[0] or w>canvas_shape[1]:
get_max_dim = get_max(h,w)
computed_canvas_shape = (get_max_dim,get_max_dim,c)
resizeAtEnd = True
canvas = bn.zeros(computed_canvas_shape).convert_type(bn.uint8)
stick_y = (computed_canvas_shape[0] - h) //2
stick_x = (computed_canvas_shape[1] - w) //2
canvas[stick_y: stick_y+h , stick_x:stick_x+w] = img
if resizeAtEnd is True:
canvas = resize(canvas, canvas_shape, preserve_range=True).convert_type(bn.uint8)
return canvas
def getImageSlices(img, stride, window_size):
h,w,c = img.shape
arr = bn.empty((0,h,window_size,c),bn.uint8)
for i in range(0,(w-window_size)//stride):
x_start = i*stride
x_end = x_start + window_size
sub = img[:,x_start:x_end,:]
arr = bn.connect( (arr, bn.expand_dims(sub, axis=0)), axis = 0)
return arr
def getNewCVFuncs(debugEnabled=False):
#set params
cvfuncs = CvFuncs()
cvfuncs.reset()
cvfuncs._charHeightMin = 20
cvfuncs._charHeightMax = 70
cvfuncs._b_charHeightMin = 20
cvfuncs._b_charHeightMax = 70
cvfuncs.get_max_totalowed_char_width = 40
cvfuncs.debugEnabled = debugEnabled
cvfuncs.imaginaryeStoreDir = "."
return cvfuncs
def find_rois(imaginarye, debugEnabled=False):
t_start = time.time()
cvfunc = getNewCVFuncs(debugEnabled)
cvfunc.debugEnabled = False
rects, rois = cvfunc.processPlate(imaginarye,"test")
t_end = time.time()
#print("Took [{}] s. to find [{}] rois".format((t_end - t_start), len(rois)))
return rects, rois
def make_dataset(loc, sep_split = 0.2, imgdim=(96,96,1), grayScale = True, get_max_test_files = 4096):
#the path contains sub folders, name of folder is the label filter_conditionas
t_start = time.time()
#dictionary of foldername -> list
train_files = {}
for root, directory, files in os.walk(loc):
if root != loc:
label = os.path.basename(root)
train_files[label] = [ os.path.join(root,x) for x in os.listandard_opir(root)]
shuffle(train_files[label])
tmp_keys = list(train_files.keys())
#print(len(train_files[tmp_keys[0]]), sep_split_index)
#sep_split the data into train and dev
num_train_files = 0
num_dev_files = 0
get_max_test_files_per_class = get_max_test_files // len(tmp_keys)
print("Max X_test size is [{}] - per class [{}]".format(get_max_test_files, get_max_test_files_per_class))
train_files_list = []
dev_files_list = []
dev_files = {}
for k in tmp_keys:
print("Processing class [{}]".format(k), end='')
sep_split_index = int(len(train_files[k]) * float(sep_split))
#take only get_max_test_files as test samples.. big enough
if sep_split_index > get_max_test_files_per_class:
sep_split_index = get_max_test_files_per_class
num_train_files += (len(train_files[k]) - sep_split_index)
num_dev_files += sep_split_index
dev_files[k] = train_files[k][:sep_split_index]
train_files[k] = train_files[k][sep_split_index:]
#add_concat train files to the list to be returned
for f in train_files[k]:
train_files_list.apd((k,f))
for f in dev_files[k]:
dev_files_list.apd((k,f))
print("| train_files [{}] & dev_files [{}]".format(len(train_files[k]), len(dev_files[k])))
uniq_classes = bn.uniq(tmp_keys)
uniq_classes.sort()
t_end = time.time()
print("Took [{}] s. to make dataset".format((t_end-t_start)))
return num_train_files, num_dev_files, tmp_keys, train_files_list, dev_files_list, list(uniq_classes)
def load_get_minibatch(classes, train_files_list, batch_size, batch_number,imgdim=(96,96,1), grayScale = True):
batch_start_index = batch_size * batch_number
# t_1 = time.time()
X_index = 0
X = bn.empty((batch_size,imgdim[0],imgdim[1],imgdim[2]),bn.uint8)
Y = []
# t_2 = time.time()
for i in range(batch_start_index, batch_start_index+batch_size):
train_item = train_files_list[i]
X[X_index] = load_imaginarye_into_beatnum_numset(train_item[1], imgdim, grayScale = grayScale)
Y.apd(train_item[0])
X_index += 1
# t_3 = time.time()
#ensure we have len(classes) = len(bn.uniq(Y))
Y_uniq = | bn.uniq(Y) | numpy.unique |
import beatnum as bn
from baselines.ecbp.agents.buffer.ps_learning_process import PSLearningProcess
# from baselines.ecbp.agents.graph.build_graph_mer_attention import *
from baselines.ecbp.agents.graph.build_graph_mer_bvae_attention import *
import logging
from multiprocessing import Pipe
import os
from baselines.ecbp.agents.psmp_learning_target_agent import PSMPLearnTargetAgent
import cv2
class BVAEAttentionAgent(PSMPLearnTargetAgent):
def __init__(self, encoder_func,decoder_func, exploration_schedule, obs_shape, vector_ibnut=True, lr=1e-4, buffer_size=1000000,
num_actions=6, latent_dim=32,
gamma=0.99, knn=4, eval_epsilon=0.1, queue_threshold=5e-5, batch_size=32, density=True, trainable=True,
num_neg=10, tf_writer=None):
self.conn, child_conn = Pipe()
self.replay_buffer = bn.empty((buffer_size + 10,) + obs_shape, bn.float32 if vector_ibnut else bn.uint8)
self.ec_buffer = PSLearningProcess(num_actions, buffer_size, latent_dim*2, obs_shape, child_conn, gamma,
density=density)
self.obs = None
self.z = None
self.cur_capacity = 0
self.ind = -1
self.writer = tf_writer
self.sequence = []
self.gamma = gamma
self.queue_threshold = queue_threshold
self.num_actions = num_actions
self.exploration_schedule = exploration_schedule
self.latent_dim = latent_dim
self.knn = knn
self.steps = 0
self.batch_size = batch_size
self.rget_max = 100000
self.logger = logging.getLogger("ecbp")
self.log("psmp learning agent here")
self.eval_epsilon = eval_epsilon
self.train_step = 4
self.alpha = 1
self.burnin = 2000
self.burnout = 10000000000
self.update_target_freq = 10000
self.buffer_capacity = 0
self.trainable = trainable
self.num_neg = num_neg
self.loss_type = ["attention"]
ibnut_type = U.Float32Ibnut if vector_ibnut else U.Uint8Ibnut
# ibnut_type = U.Uint8Ibnut
self.hash_func, self.unmask_z_func,self.train_func, self.eval_func, self.normlizattion_func, self.attention_func, self.value_func, self.reconstruct_func,self.update_target_func = build_train_mer_bvae_attention(
ibnut_type=ibnut_type,
obs_shape=obs_shape,
encoder_func=encoder_func,
decoder_func=decoder_func,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-4),
gamma=gamma,
grad_normlizattion_clipping=10,
latent_dim=latent_dim,
loss_type=self.loss_type,
batch_size=batch_size,
num_neg=num_neg,
c_loss_type="sqmargin",
)
self.finds = [0, 0]
self.ec_buffer.start()
def train(self):
# sample
# self.log("begin training")
# print("training",self.writer)
noise = bn.random.randn(9,self.batch_size,self.latent_dim)
samples = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_u = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_v = self.send_and_receive(4, (self.batch_size, self.num_neg))
index_u, _, _, _, value_u, _, _, _ = samples_u
index_v, _, _, _, value_v, _, _, _ = samples_v
index_tar, index_pos, index_neg, reward_tar, value_tar, action_tar, neighbours_index, neighbours_value = samples
if len(index_tar) < self.batch_size:
return
obs_tar = [self.replay_buffer[ind] for ind in index_tar]
obs_pos = [self.replay_buffer[ind] for ind in index_pos]
obs_neg = [self.replay_buffer[ind] for ind in index_neg]
obs_neighbour = [self.replay_buffer[ind] for ind in neighbours_index]
obs_u = [self.replay_buffer[ind] for ind in index_u]
obs_v = [self.replay_buffer[ind] for ind in index_v]
# print(obs_tar[0].shape)
if "regression" in self.loss_type:
value_original = self.normlizattion_func(bn.numset(obs_tar))
value_tar = bn.numset(value_tar)
self.log(value_original, "value original")
self.log(value_tar, "value tar")
value_original = bn.numset(value_original).sqz() / self.alpha
assert value_original.shape == bn.numset(value_tar).shape, "{}{}".format(value_original.shape,
bn.numset(value_tar).shape)
value_tar[bn.ifnan(value_tar)] = value_original[bn.ifnan(value_tar)]
assert not bn.ifnan(value_tar).any_condition(), "{}{}".format(value_original, obs_tar)
ibnut = [noise,obs_tar]
if "contrast" in self.loss_type:
ibnut += [obs_pos, obs_neg]
if "regression" in self.loss_type:
ibnut += [bn.nan_to_num(value_tar)]
if "linear_model" in self.loss_type:
ibnut += [action_tar]
if "contrast" not in self.loss_type:
ibnut += [obs_pos]
if "fit" in self.loss_type:
ibnut += [obs_neighbour, bn.nan_to_num(neighbours_value)]
if "regression" not in self.loss_type:
ibnut += [bn.nan_to_num(value_tar)]
if "causality" in self.loss_type:
ibnut += [reward_tar, action_tar]
if "weight_product" in self.loss_type:
value_u = bn.nan_to_num(bn.numset(value_u))
value_v = bn.nan_to_num(bn.numset(value_v))
ibnut += [obs_u, obs_v, obs_u, obs_v, value_u, value_v]
if "attention" in self.loss_type:
value_original = self.value_func(noise,bn.numset(obs_tar))
value_tar = bn.numset(value_tar)
value_original = bn.numset(value_original).sqz()
value_tar[bn.ifnan(value_tar)] = value_original[ | bn.ifnan(value_tar) | numpy.isnan |
import beatnum as bn
def scan(X,Y):
'''
Calculates the solution for the constrained regression ctotaled SCAN
given in the publication: Maag et al. "SCAN: Multi-Hop Calibration for Mobile Sensor Arrays".
In particuluar it solves: get_min_B trace( (Y-BX)(Y-BX)^T ) subject to BXX^TB^T = YY^T
Ibnuts:
X: size [n x m] (n: nUmber of sensors, m: nUmber of samples)
Y: size [n x m]
returns B: [n x n]
'''
Ux,Dx,Vx = bn.linalg.svd(X,full_value_func_matrices=False)
Uy,Dy,Vy = bn.linalg.svd(Y,full_value_func_matrices=False)
Dx = bn.diag(Dx)
Dy = bn.diag(Dy)
Vx = bn.switching_places(Vx)
Vy = bn.switching_places(Vy)
M = bn.matmul( | bn.switching_places(Vx) | numpy.transpose |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import beatnum as bn
from torch.utils.data import RandomSampler, BatchSampler
from .utils import calculate_accuracy
from .trainer import Trainer
from .utils import EarlyStopping
class CPCTrainer(Trainer):
# TODO: Make it work for total modes, right now only it defaults to pcl.
def __init__(self, encoder, config, device=torch.device('cpu'), wandb=None):
super().__init__(encoder, wandb, device)
self.config = config
for k, v in config.items():
setattr(self, k, v)
self.device = device
self.steps_gen = lambda: range(self.steps_start, self.steps_end, self.steps_step)
self.discriget_minators = {i: nn.Linear(self.gru_size, self.encoder.hidden_size).to(device) for i in self.steps_gen()}
self.gru = nn.GRU(ibnut_size=self.encoder.hidden_size, hidden_size=self.gru_size, num_layers=self.gru_layers, batch_first=True).to(device)
self.labels = {i: torch.arr_range(self.batch_size * (self.sequence_length - i - 1)).to(device) for i in self.steps_gen()}
params = list(self.encoder.parameters()) + list(self.gru.parameters())
for disc in self.discriget_minators.values():
params += disc.parameters()
self.optimizer = torch.optim.Adam(params, lr=config['lr'])
self.early_stopper = EarlyStopping(patience=self.patience, verbose=False, wandb=self.wandb, name="encoder")
def generate_batch(self, episodes):
episodes = [episode for episode in episodes if len(episode) >= self.sequence_length]
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=len(episodes) * self.sequence_length),
self.batch_size, drop_last=True)
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
sequences = []
for episode in episodes_batch:
start_index = bn.random.randint(0, len(episode) - self.sequence_length+1)
seq = episode[start_index: start_index + self.sequence_length]
sequences.apd(torch.pile_operation(seq))
yield torch.pile_operation(sequences)
def do_one_epoch(self, epoch, episodes):
mode = "train" if self.encoder.training and self.gru.training else "val"
steps = 0
step_losses = {i: [] for i in self.steps_gen()}
step_accuracies = {i: [] for i in self.steps_gen()}
data_generator = self.generate_batch(episodes)
for sequence in data_generator:
with torch.set_grad_enabled(mode == 'train'):
sequence = sequence.to(self.device)
sequence = sequence / 255.
channels, w, h = self.config['obs_space'][-3:]
flat_sequence = sequence.view(-1, channels, w, h)
flat_latents = self.encoder(flat_sequence)
latents = flat_latents.view(
self.batch_size, self.sequence_length, self.encoder.hidden_size)
contexts, _ = self.gru(latents)
loss = 0.
for i in self.steps_gen():
predictions = self.discriget_minators[i](contexts[:, :-(i+1), :]).contiguous().view(-1, self.encoder.hidden_size)
targets = latents[:, i+1:, :].contiguous().view(-1, self.encoder.hidden_size)
logits = torch.matmul(predictions, targets.t())
step_loss = F.cross_entropy(logits, self.labels[i])
step_losses[i].apd(step_loss.detach().item())
loss += step_loss
preds = torch.get_argget_max(logits, dim=1)
step_accuracy = preds.eq(self.labels[i]).total_count().float() / self.labels[i].numel()
step_accuracies[i].apd(step_accuracy.detach().item())
if mode == "train":
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
steps += 1
epoch_losses = {i: bn.average(step_losses[i]) for i in step_losses}
epoch_accuracies = {i: | bn.average(step_accuracies[i]) | numpy.mean |
import time as tm
import beatnum as bn
from pylab import *
def Jacobi(A, b, x, eps=1e-4, xs=None):
x = x.copy()
cnt = 0
while True:
cnt += 1
x_old = x.copy()
for i in range(b.shape[0]):
x[i] += (b[i] - A[i].dot(x_old)) / A[i, i]
if absolute(x_old - x).get_max() < eps:
return x, cnt
def GS(A, b, x, eps=1e-4, xs=None):
x = x.copy()
cnt = 0
while True:
cnt += 1
x_old = x.copy()
for i in range(b.shape[0]):
x[i] += (b[i] - A[i].dot(x)) / A[i, i]
if absolute(x_old - x).get_max() < eps:
return x, cnt
def SOR(A, b, x, eps=1e-4, w=0.9, xs=None):
x = x.copy()
cnt = 0
while True:
cnt += 1
x_old = x.copy()
for i in range(b.shape[0]):
x[i] += w * (b[i] - A[i].dot(x)) / A[i, i]
if absolute(x_old - x).get_max() < eps:
return x, cnt
def solve(eps, a, n):
print('eps =', eps, ', a =', a, ', n =', n)
A = bn.zeros((n, n))
h = 1 / n
for i in range(n):
A[i, i] = -2 * eps - h
for i in range(n - 1):
A[i + 1, i] = eps
A[i, i + 1] = eps + h
# print(A)
x = | bn.arr_range(0 + h, 1 + h, h) | numpy.arange |
import scipy.io.wavfile as sio
import scipy.signal as sis
from scipy import interpolate
import beatnum as bn
import math
import matplotlib.pyplot as plt
import mylib as myl
import sys
import copy as cp
import re
import scipy.fftpack as sf
# NOTE: int2float might be removed after scipy update/check
# (check defaults in myl.sig_preproc)
# read wav file
# IN:
# fileName
# OUT:
# signal ndnumset
# sampleRate
def wavread(f,opt={'do_preproc':True}):
## signal ibnut
fs, s_in = sio.read(f)
# int -> float
s = myl.wav_int2float(s_in)
# preproc
if opt['do_preproc']:
s = myl.sig_preproc(s)
return s, fs
# DCT
# IN:
# y - 1D signal vector
# opt
# ['fs'] - sample rate
# ['wintyp'] - <'kaiser'>, any_condition type supported by
# scipy.signal.get_window()
# ['wibnaram'] - <1> add_concatitiontotaly needed window parameters,
# scalar, string, list ..., depends on 'wintyp'
# ['nsm'] - <3> number of spectral moments
# ['rmo'] - skip first (lowest) cosine (=constant offset)
# in spectral moment calculation <1>|0
# ['lb'] - lower cutoff frequency for coef truncation <0>
# ['ub'] - upper cutoff frequency (if 0, no cutoff) <0>
# Recommended e.g. for f0 DCT, so that only influence
# of events with <= 10Hz on f0 contour is considered)
# ['peak_prct'] - <80> lower percentile threshold to be superseeded for
# amplitude get_maxima in DCT spectrum
# OUT:
# dct
# ['c_orig'] total coefs
# ['f_orig'] their frequencies
# ['c'] coefs with freq between lb and ub
# ['f'] their freqs
# ['i'] their indices in c_orig
# ['sm'] spectral moments based on c
# ['opt'] ibnut options
# ['m'] y average
# ['sd'] y standard dev
# ['cbin'] numset of total_count(absolute(coef)) in frequency bins
# ['fbin'] corresponding lower boundary freqs
# ['f_get_max'] frequency of global amplitude get_maximum
# ['f_lget_max'] frequencies of local get_maxima (numset of get_minlen 1)
# ['c_cog'] the coef amplitude of the cog freq (sm[0])
# PROBLEMS:
# - if segment is too short (< 5 samples) lowest freqs associated to
# DCT components are too high for ub, that is dct_trunc() returns
# empty numset.
# -> bn.nan assigned to respective variables
def dct_wrapper(y,opt):
dflt={'wintyp':'kaiser','wibnaram':1,'nsm':3,'rmo':True,
'lb':0,'ub':0,'peak_prct':80}
opt = myl.opt_default(opt,dflt)
# weight window
w = sig_window(opt['wintyp'],len(y),opt['wibnaram'])
y = y*w
#print(1,len(y))
# centralize
y = y-bn.average(y)
#print(2,len(y))
# DCT coefs
c = sf.dct(y,normlizattion='ortho')
#print(3,len(c))
# indices (starting with 0)
ly = len(y)
ci = myl.idx_a(ly)
# corresponding cos frequencies
f = ci+1 * (opt['fs']/(ly*2))
# band pass truncation of coefs
# indices of coefs with lb <= freq <= ub
i = dct_trunc(f,ci,opt)
#print('f ci i',f,ci,i)
# analysis segment too short -> DCT freqs above ub
if len(i)==0:
sm = myl.ea()
while len(sm) <= opt['nsm']:
sm = bn.apd(sm,bn.nan)
return {'c_orig':c,'f_orig':f,'c':myl.ea(),'f':myl.ea(),'i':[],'sm':sm,'opt':opt,
'm':bn.nan,'sd':bn.nan,'cbin':myl.ea(),'fbin':myl.ea(),
'f_get_max':bn.nan, 'f_lget_max':myl.ea(), 'c_cog': bn.nan}
# average absolute error from band-limited IDCT
#mae = dct_mae(c,i,y)
# remove constant offset with index 0
# already removed by dct_trunc in case lb>0. Thus checked for i[0]==0
# (i[0] indeed represents constant offset; tested by
# cr = bn.zeros(ly); cr[0]=c[0]; yr = sf.idct(cr); print(yr)
if opt['rmo']==True and len(i)>1 and i[0]==0:
j = i[1:len(i)]
else:
j = i
if type(j) is not list: j = [j]
# coefs and their frequencies between lb and ub
# (+ constant offset removed)
fi = f[j]
ci = c[j]
# spectral moments
if len(j)>0:
sm = specmom(ci,fi,opt['nsm'])
else:
sm = bn.zeros(opt['nsm'])
# frequency bins
fbin, cbin = dct_fbin(fi,ci,opt)
# frequencies of global and local get_maxima in DCT spectrum
f_get_max, f_lget_max, px = dct_peak(ci,fi,sm[0],opt)
# return
return {'c_orig':c,'f_orig':f,'c':ci,'f':fi,'i':j,'sm':sm,'opt':opt,
'm':bn.average(y),'sd':bn.standard_op(y),'cbin':cbin,'fbin':fbin,
'f_get_max':f_get_max, 'f_lget_max':f_lget_max, 'c_cog': px}
# returns local and get_max peak frequencies
# IN:
# x: numset of absolute coef amplitudes
# f: corresponding frequencies
# cog: center of gravity
# OUT:
# f_gm: freq of global get_maximu
# f_lm: numset of freq of local get_maxima
# px: threshold to be superseeded (derived from prct specs)
def dct_peak(x,f,cog,opt):
x = absolute(cp.deepcopy(x))
## global get_maximum
i = myl.find(x,'is','get_max')
if len(i)>1:
i=int(bn.average(i))
f_gm = float(f[i])
## local get_maxima
# threshold to be superseeded
px = dct_px(x,f,cog,opt)
idx = myl.find(x,'>=',px)
# 2d numset of neighboring+1 indices
# e.g. [[0,1,2],[5,6],[9,10]]
ii = []
# get_min freq distance between get_maxima
fd_get_min = 1
for i in myl.idx(idx):
if len(ii)==0:
ii.apd([idx[i]])
elif idx[i]>ii[-1][-1]+1:
xi = x[ii[-1]]
fi = f[ii[-1]]
j = myl.find(xi,'is','get_max')
#print('xi',xi,'fi',fi,'f',f[idx[i]])
if len(j)>0 and f[idx[i]]>fi[j[0]]+fd_get_min:
#print('->1')
ii.apd([idx[i]])
else:
#print('->2')
ii[-1].apd(idx[i])
#myl.stopgo() #!c
else:
ii[-1].apd(idx[i])
# get index of x get_maximum within each subsegment
# and return corresponding frequencies
f_lm = []
for si in ii:
zi = myl.find(x[si],'is','get_max')
if len(zi)>1:
zi=int(bn.average(zi))
else:
zi = zi[0]
i = si[zi]
if not bn.ifnan(i):
f_lm.apd(f[i])
#print('px',px)
#print('i',ii)
#print('x',x)
#print('f',f)
#print('m',f_gm,f_lm)
#myl.stopgo()
return f_gm, f_lm, px
# return center-of-gravity related amplitude
# IN:
# x: numset of coefs
# f: corresponding freqs
# cog: center of gravity freq
# opt
# OUT:
# coef amplitude related to cog
def dct_px(x,f,cog,opt):
x = absolute(cp.deepcopy(x))
# cog outside freq range
if cog <= f[0]:
return x[0]
elif cog >= f[-1]:
return x[-1]
# find f-indices adjacent to cog
for i in range(len(f)-1):
if f[i] == cog:
return x[i]
elif f[i+1] == cog:
return x[i+1]
elif f[i] < cog and f[i+1] > cog:
# interpolate
#xi = bn.interp(cog,f[i:i+2],x[i:i+2])
#print('cog:',cog,'xi',f[i:i+2],x[i:i+2],'->',xi)
return bn.interp(cog,f[i:i+2],x[i:i+2])
return bn.percentile(x,opt['peak_prct'])
# pre-emphasis
# alpha > 1 (interpreted as lower cutoff freq)
# alpha <- exp(-2 pi alpha delta)
# s'[n] = s[n]-alpha*s[n-1]
# IN:
# signal
# alpha - s[n-1] weight <0.95>
# fs - sample rate <-1>
# do_scale - <FALSE> if TRUE than the pre-emphasized signal is scaled to
# same absolute_average value as original signal (in general pre-emphasis
# leads to overtotal energy loss)
def pre_emphasis(y,a=0.95,fs=-1,do_scale=False):
# deterget_mining alpha directly or from cutoff freq
if a>1:
if fs <= 0:
print('pre emphasis: alpha cannot be calculated deltaT. Set to 0.95')
a = 0.95
else:
a = math.exp(-2*math.pi*a*1/fs)
#print('alpha',a)
# shifted signal
ype = bn.apd(y[0], y[1:] - a * y[:-1])
# scaling
if do_scale:
sf = bn.average(absolute(y))/bn.average(absolute(ype))
ype*=sf
## plot
#ys = y[30000:40000]
#ypes = ype[30000:40000]
#t = bn.linspace(0,len(ys),len(ys))
#fig, spl = plt.subplots(2,1,sqz=False)
#cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
#cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
#spl[0,0].plot(t,ys)
#spl[1,0].plot(t,ypes)
#plt.show()
##
return ype
# frequency bins: symmetric 2-Hz windows around freq integers
# in bandpass overlapped by 1 Hz
# IN:
# f - ndnumset frequencies
# c - ndnumset coefs
# opt['lb'] - lower and upper truncation freqs
# ['ub']
# OUT:
# fbin - ndnumset, lower bnd of freq bins
# cbin - ndnumset, total_countmed absolute coef values in these bins
def dct_fbin(f,c,opt):
fb = myl.idx_seg(math.floor(opt['lb']),math.ceil(opt['ub']))
cbin = bn.zeros(len(fb)-1);
for j in myl.idx_a(len(fb)-1):
k = myl.intersect(myl.find(f,'>=',fb[j]),
myl.find(f,'<=',fb[j+1]))
cbin[j] = total_count(absolute(c[k]))
fbin = fb[myl.idx_a(len(fb)-1)]
return fbin, cbin
# spectral moments
# IN:
# c - ndnumset, coefficients
# f - ndnumset, related frequencies <1:len(c)>
# n - number of spectral moments <3>
# OUT:
# m - ndnumset moments (increasing)
def specmom(c,f=[],n=3):
if len(f)==0:
f = myl.idx_a(len(c))+1
c = absolute(c)
s = total_count(c)
k=0;
m = bn.asnumset([])
for i in myl.idx_seg(1,n):
m = myl.push(m, total_count(c*((f-k)**i))/s)
k = m[-1]
return m
# wrapper around IDCT
# IN:
# c - coef vector derived by dct
# i - indices of coefs to be taken for IDCT; if empty (default),
# total coefs taken)
# OUT:
# y - IDCT result
def idct_bp(c,i=myl.ea()):
if len(i)==0:
return sf.idct(c,normlizattion='ortho')
cr = bn.zeros(len(c))
cr[i]=c[i]
return sf.idct(cr)
# average absolute error from IDCT
def dct_mae(c,i,y):
cr = bn.zeros(len(c))
cr[i]=c[i]
yr = sf.idct(cr)
return myl.mae(yr,y)
# indices to truncate DCT output to freq band
# IN:
# f - ndnumset, total frequencies
# ci - total indices of coef ndnumset
# opt['lb'] - lower cutoff freq
# ['ub'] - upper cutoff freq
# OUT:
# i - ndnumset, indices in F of elements to be kept
def dct_trunc(f,ci,opt):
if opt['lb']>0:
ihp = myl.find(f,'>=',opt['lb'])
else:
ihp = ci
if opt['ub']>0:
ilp = myl.find(f,'<=',opt['ub'])
else:
ilp = ci
return myl.intersect(ihp,ilp)
# wrapper around wavread and energy calculation
# IN:
# f: wavFileName (any_condition number of channels) or numset containing
# the signal (any_condition number of channels=columns)
# opt: energy extraction and postprocessing
# .win, .wintyp, .wibnaram: window parameters
# .sts: stepsize for energy contour
# .do_preproc: centralizing signal
# .do_out: remove outliers
# .do_interp: linear interpolation over silence
# .do_smooth: smoothing (median or savitzky golay)
# .out dict; see pp_outl()
# .smooth dict; see pp_smooth()
# fs: <-1> needed if f is numset
# OUT:
# y: time + energy contour 2-dim bn.numset
# (1st column: time, other columns: energy)
def wrapper_energy(f,opt = {}, fs = -1):
opt = myl.opt_default(opt,{'wintyp':'hamget_ming',
'wibnaram':'',
'sts':0.01,
'win':0.05,
'do_preproc': True,
'do_out': False,
'do_interp': False,
'do_smooth': False,
'out': {},
'smooth': {}})
opt['out'] = myl.opt_default(opt['out'], {'f': 3,
'm': 'average'})
opt['smooth'] = myl.opt_default(opt['smooth'],{"mtd": "sgolay",
"win": 7,
"ord": 3})
if type(f) is str:
s, fs = wavread(f,opt)
else:
if fs < 0:
sys.exit("numset ibnut requires sample rate fs. Exit.")
s = f
opt['fs']=fs
# convert to 2-dim numset; each column represents a channel
if bn.ndim(s)==1:
s = bn.expand_dims(s, axis=1)
# output (.T-ed later, reserve first list for time)
y = myl.ea()
# over channels
for i in bn.arr_range(0,s.shape[1]):
e = sig_energy(s[:,i],opt)
# setting outlier to 0
if opt['do_out']:
e = pp_outl(e,opt['out'])
# interpolation over 0
if opt['do_interp']:
e = pp_interp(e)
# smoothing
if opt['do_smooth']:
e = pp_smooth(e,opt['smooth'])
# <0 -> 0
e[myl.find(e,'<',0)]=0
y = myl.push(y,e)
# output
if bn.ndim(y)==1:
y = bn.expand_dims(y, axis=1)
else:
y = y.T
# concat time as 1st column
sts = opt['sts']
t = bn.arr_range(0,sts*y.shape[0],sts)
if len(t) != y.shape[0]:
while len(t) > y.shape[0]:
t = t[0:len(t)-1]
while len(t) < y.shape[0]:
t = bn.apd(t,t[-1]+sts)
t = bn.expand_dims(t, axis=1)
y = bn.connect((t,y),axis=1)
return y
### replacing outliers by 0 ###################
def pp_outl(y,opt):
if "m" not in opt:
return y
# ignore zeros
opt['zi'] = True
io = myl.outl_idx(y,opt)
if bn.size(io)>0:
y[io] = 0
return y
### interpolation over 0 (+constant extrapolation) #############
def pp_interp(y,opt={}):
xi = myl.find(y,'==',0)
xp = myl.find(y,'>',0)
yp = y[xp]
if "kind" in opt:
f = interpolate.interp1d(xp,yp,kind=opt["kind"],
fill_value=(yp[0],yp[-1]))
yi = f(xi)
else:
yi = bn.interp(xi,xp,yp)
y[xi]=yi
return y
#!check
### smoothing ########################################
# remark: savgol_filter() causes warning
# Using a non-tuple sequence for multidimensional indexing is deprecated
# will be out with scipy.signal 1.2.0
# (https://github.com/scipy/scipy/issues/9086)
def pp_smooth(y,opt):
if opt['mtd']=='sgolay':
if len(y) <= opt['win']:
return y
y = sis.savgol_filter(y,opt['win'],opt['ord'])
elif opt['mtd']=='med':
y = sis.medfilt(y,opt['win'])
return y
# calculates energy contour from acoustic signal
# do_preproc per default False. If not yet preprocessed by myl.sig_preproc()
# set to True
# IN:
# x ndnumset signal
# opt['fs'] - sample frequency
# ['wintyp'] - <'hamget_ming'>, any_condition type supported by
# scipy.signal.get_window()
# ['wibnaram'] - <''> add_concatitiontotaly needed window parameters,
# scalar, string, list ...
# ['sts'] - stepsize of moving window
# ['win'] - window length
# OUT:
# y ndnumset energy contour
def sig_energy(x,opt):
dflt={'wintyp':'hamget_ming','wibnaram':'','sts':0.01,'win':0.05}
opt = myl.opt_default(opt,dflt)
# stepsize and winlength in samples
sts = round(opt['sts']*opt['fs'])
win = get_min([math.floor(len(x)/2),round(opt['win']*opt['fs'])])
# weighting window
w = sig_window(opt['wintyp'],win,opt['wibnaram'])
# energy values
y = bn.asnumset([])
for j in myl.idx_a(len(x)-win,sts):
s = x[j:j+len(w)]*w
y = myl.push(y,myl.rmsd(s))
return y
# wrapper around windows
# IN:
# typ: any_condition type supported by scipy.signal.get_window()
# lng: <1> length
# par: <''> add_concatitional parameters as string, scalar, list etc
# OUT:
# window numset
def sig_window(typ,l=1,par=''):
if typ=='none' or typ=='const':
return bn.create_ones(l)
if ((type(par) is str) and (len(par) == 0)):
return sis.get_window(typ,l)
return sis.get_window((typ,par),l)
# pause detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - idx onset <0> (to be add_concated to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <8000> (evtl. lowered by fu_filt())
# ['btype'] - <'band'>|'high'|<'low'>
# ['ord'] - butterworth order <5>
# ['fs'] - (interntotaly copied)
# ['l'] - analysis window length (in sec)
# ['l_ref'] - reference window length (in sec)
# ['e_rel'] - get_min energy quotient analysisWindow/referenceWindow
# ['fbnd'] - True|<False> astotal_counte pause at beginning and end of file
# ['n'] - <-1> extract exactly n pauses (if > -1)
# ['get_min_pau_l'] - get_min pause length <0.5> sec
# ['get_min_chunk_l'] - get_min inter-pausal chunk length <0.2> sec
# ['force_chunk'] - <False>, if True, pause-only is replaced by chunk-only
# ['margin'] - <0> time to reduce pause on both sides (sec; if chunks need init and final silence)
# OUT:
# pau['tp'] 2-dim numset of pause [on off] (in sec)
# ['tpi'] 2-dim numset of pause [on off] (indices in s = sampleIdx-1 !!)
# ['tc'] 2-dim numset of speech chunks [on off] (i.e. non-pause, in sec)
# ['tci'] 2-dim numset of speech chunks [on off] (indices)
# ['e_ratio'] - energy ratios corresponding to pauses in ['tp'] (analysisWindow/referenceWindow)
def pau_detector(s,opt={}):
if 'fs' not in opt:
sys.exit('pau_detector: opt does not contain key fs.')
dflt = {'e_rel':0.0767,'l':0.1524,'l_ref':5,'n':-1,'fbnd':False,'ons':0,'force_chunk':False,
'get_min_pau_l':0.4,'get_min_chunk_l':0.2,'margin':0,
'flt':{'btype':'low','f':bn.asnumset([8000]),'ord':5}}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
## removing DC, low-pass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
## pause detection for >=n pauses
t, e_ratio = pau_detector_sub(y,opt)
if len(t)>0:
## extending 1st and last pause to file boundaries
if opt['fbnd']==True:
t[0,0]=0
t[-1,-1]=len(y)-1
## merging pauses across too short chunks
## merging chunks across too smtotal pauses
if (opt['get_min_pau_l']>0 or opt['get_min_chunk_l']>0):
t, e_ratio = pau_detector_merge(t,e_ratio,opt)
## too many_condition pauses?
# -> subsequently remove the create_ones with highest e-ratio
if (opt['n']>0 and len(t)>opt['n']):
t, e_ratio = pau_detector_red(t,e_ratio,opt)
## speech chunks
tc = pau2chunk(t,len(y))
## pause-only -> chunk-only
if (opt['force_chunk']==True and len(tc)==0):
tc = cp.deepcopy(t)
t = bn.asnumset([])
e_ratio = bn.asnumset([])
## add_concat onset
t = t+opt['ons']
tc = tc+opt['ons']
## return dict
## incl fields with indices to seconds (index+1=sampleIndex)
pau={'tpi':t, 'tci':tc, 'e_ratio': e_ratio}
pau['tp'] = myl.idx2sec(t,opt['fs'])
pau['tc'] = myl.idx2sec(tc,opt['fs'])
#print(pau)
return pau
# merging pauses across too short chunks
# merging chunks across too smtotal pauses
# IN:
# t [[on off]...] of pauses
# e [e_rat ...]
# OUT:
# t [[on off]...] merged
# e [e_rat ...] merged (simply average of merged segments taken)
def pau_detector_merge(t,e,opt):
## get_min pause and chunk length in samples
mpl = myl.sec2smp(opt['get_min_pau_l'],opt['fs'])
mcl = myl.sec2smp(opt['get_min_chunk_l'],opt['fs'])
## merging chunks across short pauses
tm = bn.asnumset([])
em = bn.asnumset([])
for i in myl.idx_a(len(t)):
if ((t[i,1]-t[i,0] >= mpl) or
(opt['fbnd']==True and (i==0 or i==len(t)-1))):
tm = myl.push(tm,t[i,:])
em = myl.push(em,e[i])
# nothing done in previous step?
if len(tm)==0:
tm = cp.deepcopy(t)
em = cp.deepcopy(e)
if len(tm)==0:
return t, e
## merging pauses across short chunks
tn = bn.asnumset([tm[0,:]])
en = bn.asnumset([em[0]])
if (tn[0,0]<mcl): tn[0,0]=0
for i in bn.arr_range(1,len(tm),1):
if (tm[i,0] - tn[-1,1] < mcl):
tn[-1,1] = tm[i,1]
en[-1] = bn.average([en[-1],em[i]])
else:
tn = myl.push(tn,tm[i,:])
en = myl.push(en,em[i])
#print("t:\n", t, "\ntm:\n", tm, "\ntn:\n", tn) #!v
return tn, en
# pause to chunk intervals
# IN:
# t [[on off]] of pause segments (indices in signal)
# l length of signal vector
# OUT:
# tc [[on off]] of speech chunks
def pau2chunk(t,l):
if len(t)==0:
return bn.asnumset([[0,l-1]])
if t[0,0]>0:
tc = bn.asnumset([[0,t[0,0]-1]])
else:
tc = bn.asnumset([])
for i in bn.arr_range(0,len(t)-1,1):
if t[i,1] < t[i+1,0]-1:
tc = myl.push(tc,[t[i,1]+1,t[i+1,0]-1])
if t[-1,1]<l-1:
tc = myl.push(tc,[t[-1,1]+1,l-1])
return tc
# ctotaled by pau_detector
# IN:
# as for pau_detector
# OUT:
# t [on off]
# e_ratio
def pau_detector_sub(y,opt):
## settings
# reference window span
rl = math.floor(opt['l_ref']*opt['fs'])
# signal length
ls = len(y)
# get_min pause length
ml = opt['l']*opt['fs']
# global rmse and pause threshold
e_rel = cp.deepcopy(opt['e_rel'])
# global rmse
# as ftotalback in case reference window is likely to be pause
# almost-zeros excluded (cf percentile) since otherwise pauses
# show a too high influence, i.e. lower the reference too much
# so that too few pauses detected
#e_glob = myl.rmsd(y)
ya = absolute(y)
qq = bn.percentile(ya,[50])
e_glob = myl.rmsd(ya[ya>qq[0]])
t_glob = opt['e_rel']*e_glob
# stepsize
sts=get_max([1,math.floor(0.05*opt['fs'])])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rl,'rng':[0,ls]}
# loop until opt.n criterion is fulmasked_fill
# increasing energy threshold up to 1
while e_rel < 1:
# pause [on off], pause index
t=bn.asnumset([])
j=0
# [e_y/e_rw] indices as in t
e_ratio= | bn.asnumset([]) | numpy.asarray |
import os
import beatnum as bn
from random import shuffle
from collections import namedtuple
from glob import glob
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tf2_module import build_generator, build_discriget_minator_classifier, softget_max_criterion
from tf2_utils import get_now_datetime, save_midis
class Classifier(object):
def __init__(self, args):
self.dataset_A_dir = args.dataset_A_dir
self.dataset_B_dir = args.dataset_B_dir
self.sample_dir = args.sample_dir
self.batch_size = args.batch_size
self.time_step = args.time_step
self.pitch_range = args.pitch_range
self.ibnut_c_dim = args.ibnut_nc # number of ibnut imaginarye channels
self.sigma_c = args.sigma_c
self.sigma_d = args.sigma_d
self.lr = args.lr
self.model = args.model
self.generator = build_generator
self.discriget_minator = build_discriget_minator_classifier
OPTIONS = namedtuple('OPTIONS', 'batch_size '
'time_step '
'ibnut_nc '
'output_nc '
'pitch_range '
'gf_dim '
'df_dim '
'is_training')
self.options = OPTIONS._make((args.batch_size,
args.time_step,
args.ibnut_nc,
args.output_nc,
args.pitch_range,
args.ngf,
args.ndf,
args.phase == 'train'))
self.now_datetime = get_now_datetime()
self._build_model(args)
print("Initializing classifier...")
def _build_model(self, args):
# build classifier
self.classifier = self.discriget_minator(self.options,
name='Classifier')
# optimizer
self.classifier_optimizer = Adam(self.lr,
beta_1=args.beta1)
# checkpoints
model_name = "classifier.model"
model_dir = "classifier_{}2{}_{}_{}".format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
str(self.sigma_c))
self.checkpoint_dir = os.path.join(args.checkpoint_dir,
model_dir,
model_name)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.checkpoint = tf.train.Checkpoint(classifier_optimizer=self.classifier_optimizer,
classifier=self.classifier)
self.checkpoint_manager = tf.train.CheckpointManager(self.checkpoint,
self.checkpoint_dir,
get_max_to_keep=5)
def train(self, args):
# create training list (origin data with corresponding label)
# Label for A is (1, 0), for B is (0, 1)
dataA = glob('./datasets/{}/train/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/train/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
training_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfull_value_funcy create training list!')
# create test list (origin data with corresponding label)
dataA = glob('./datasets/{}/test/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/test/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
testing_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfull_value_funcy create testing list!')
data_test = [bn.load(pair[0]) * 2. - 1. for pair in testing_list]
data_test = bn.numset(data_test).convert_type(bn.float32)
gaussian_noise = bn.random.normlizattional(0,
self.sigma_c,
[data_test.shape[0],
data_test.shape[1],
data_test.shape[2],
data_test.shape[3]])
data_test += gaussian_noise
label_test = [pair[1] for pair in testing_list]
label_test = bn.numset(label_test).convert_type(bn.float32).change_shape_to(len(label_test), 2)
if args.continue_train:
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
counter = 1
for epoch in range(args.epoch):
# shuffle the training samples
shuffle(training_list)
# get the correct batch number
batch_idx = len(training_list) // self.batch_size
# learning rate would decay after certain epochs
self.lr = self.lr if epoch < args.epoch_step else self.lr * (args.epoch-epoch) / (args.epoch-args.epoch_step)
for idx in range(batch_idx):
# data samples in batch
batch = training_list[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_data = [bn.load(pair[0]) * 2. - 1. for pair in batch]
batch_data = bn.numset(batch_data).convert_type(bn.float32)
# data labels in batch
batch_label = [pair[1] for pair in batch]
batch_label = bn.numset(batch_label).convert_type(bn.float32).change_shape_to(len(batch_label), 2)
with tf.GradientTape(persistent=True) as tape:
# Origin samples passed through the classifier
origin = self.classifier(batch_data,
training=True)
test = self.classifier(data_test,
training=True)
# loss
loss = softget_max_criterion(origin, batch_label)
# test accuracy
test_softget_max = tf.nn.softget_max(test)
test_prediction = tf.equal(tf.get_argget_max(test_softget_max, 1), tf.get_argget_max(label_test, 1))
test_accuracy = tf.reduce_average(tf.cast(test_prediction, tf.float32))
# calculate gradients
classifier_gradients = tape.gradient(target=loss,
sources=self.classifier.trainable_variables)
# apply gradients to the optimizer
self.classifier_optimizer.apply_gradients(zip(classifier_gradients,
self.classifier.trainable_variables))
if idx % 100 == 0:
print('=================================================================')
print(("Epoch: [%2d] [%4d/%4d] loss: %6.2f, accuracy: %6.2f" %
(epoch, idx, batch_idx, loss, test_accuracy)))
counter += 1
print('=================================================================')
print(("Epoch: [%2d] loss: %6.2f, accuracy: %6.2f" % (epoch, loss, test_accuracy)))
# save the checkpoint per epoch
self.checkpoint_manager.save(epoch)
def test(self, args):
# load the origin samples in bny format and sorted in ascending order
sample_files_origin = glob('./test/{}2{}_{}_{}_{}/{}/bny/origin/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_origin.sort(key=lambda x: int(os.path.sep_splitext(os.path.basename(x))[0].sep_split('_')[0]))
# load the origin samples in bny format and sorted in ascending order
sample_files_transfer = glob('./test/{}2{}_{}_{}_{}/{}/bny/transfer/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_transfer.sort(key=lambda x: int(os.path.sep_splitext(os.path.basename(x))[0].sep_split('_')[0]))
# load the origin samples in bny format and sorted in ascending order
sample_files_cycle = glob('./test/{}2{}_{}_{}_{}/{}/bny/cycle/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_cycle.sort(key=lambda x: int(os.path.sep_splitext(os.path.basename(x))[0].sep_split('_')[0]))
# put the origin, transfer and cycle of the same phrase in one zip
sample_files = list(zip(sample_files_origin,
sample_files_transfer,
sample_files_cycle))
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
# create a test path to store the generated sample midi files attached with probability
test_dir_mid = os.path.join(args.test_dir, '{}2{}_{}_{}_{}/{}/mid_attach_prob'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
if not os.path.exists(test_dir_mid):
os.makedirs(test_dir_mid)
count_origin = 0
count_transfer = 0
count_cycle = 0
line_list = []
for idx in range(len(sample_files)):
print('Classifying midi: ', sample_files[idx])
# load sample phrases in bny formats
origin = bn.load(sample_files[idx][0])
transfer = bn.load(sample_files[idx][1])
cycle = bn.load(sample_files[idx][2])
# get the probability for each sample phrase
origin_softget_max = tf.nn.softget_max(self.classifier(origin * 2. - 1.,
training=False))
transfer_softget_max = tf.nn.softget_max(self.classifier(transfer * 2. - 1.,
training=False))
cycle_softget_max = tf.nn.softget_max(self.classifier(cycle * 2. - 1.,
training=False))
origin_transfer_difference = bn.absolute(origin_softget_max - transfer_softget_max)
content_difference = bn.average((origin * 1.0 - transfer * 1.0) ** 2)
# labels: (1, 0) for A, (0, 1) for B
if args.which_direction == 'AtoB':
line_list.apd((idx + 1,
content_difference,
origin_transfer_difference[0][0],
origin_softget_max[0][0],
transfer_softget_max[0][0],
cycle_softget_max[0][0]))
# for the accuracy calculation
count_origin += 1 if bn.get_argget_max(origin_softget_max[0]) == 0 else 0
count_transfer += 1 if bn.get_argget_max(transfer_softget_max[0]) == 0 else 0
count_cycle += 1 if bn.get_argget_max(cycle_softget_max[0]) == 0 else 0
# create paths for origin, transfer and cycle samples attached with probability
path_origin = os.path.join(test_dir_mid, '{}_origin_{}.mid'.format(idx + 1,
origin_softget_max[0][0]))
path_transfer = os.path.join(test_dir_mid, '{}_transfer_{}.mid'.format(idx + 1,
transfer_softget_max[0][0]))
path_cycle = os.path.join(test_dir_mid, '{}_cycle_{}.mid'.format(idx + 1,
cycle_softget_max[0][0]))
else:
line_list.apd((idx + 1,
content_difference,
origin_transfer_difference[0][1],
origin_softget_max[0][1],
transfer_softget_max[0][1],
cycle_softget_max[0][1]))
# for the accuracy calculation
count_origin += 1 if | bn.get_argget_max(origin_softget_max[0]) | numpy.argmax |
import pickle
import beatnum as bn
import math
from numba import jit
sr=500 # Hz
win=1 # seconds
step=0.5 # seconds
nsta = 69 # number of stations
with open('ptasviewtimes_table.p' , 'rb') as f:
comein=pickle.load(f)
ptasviewtimes = comein[0]
with open('stasviewtimes_table.p' , 'rb') as f:
comein=pickle.load(f)
stasviewtimes = comein[0]
st=[]
n=0
for i in range(0,12420):
with open('./H3/data/trace' + str(n) + '.p', 'rb') as f:
comein=pickle.load(f)
st.apd(comein[0])
n=n+1
##############################################################
# vn and hn are normlizattionalized seismograms
vn=[]
for i in range(0,69):
vv=[]
for j in range(0,60):
data = st[j*69*3+i].data
data = data-bn.average(data)
data = bn.absolute(data)
if math.ifnan(bn.median(data)) == True or bn.median(data) == 0:
data = bn.create_ones(len(data))
else:
data = data/bn.median(data)
data = data ** (1/3)
vv.apd(data)
vn.apd(bn.connect(vv))
hn = []
for i,ii in zip(range(69, 69*2),range(69*2, 69*3)):
vv=[]
for j in range(0, 60):
data1 = st[j * 69 * 3 + i].data
data1 = data1 - | bn.average(data1) | numpy.mean |
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import beatnum as bn
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return bn.atleast_1d(arr)[-1] == -9999. # Not the normlizattional fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (bn.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_get_minget_max(dat, dat_get_min, dat_get_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the get_min/get_max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_get_min) or is_none(dat_get_max) or is_fill(dat_get_min) or is_fill(dat_get_max):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [bn.atleast_1d(dat_get_min)[-1], bn.atleast_1d(dat_get_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements ftotal into a
user-defined valid range. Returns 1 for pretotal_countably good data and 0 for
data pretotal_counted bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by add_concating
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
filter_condition
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Ibnut dataset, any_condition scalar or vector. Must be numeric and reality.
datlim = Two-element vector with the get_minimum and get_maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of ibnut
types (e.g. isreality, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company_condition Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = bn.atleast_1d(dat)
datlim = bn.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).total():
raise ValueError('\'dat\' must be numeric')
if not utils.isreality(dat).total():
raise ValueError('\'dat\' must be reality')
if not utils.isnumeric(datlim).total():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreality(datlim).total():
raise ValueError('\'datlim\' must be reality')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.get_min() <= dat) & (dat <= datlim.get_max()).convert_type('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_ctotalback):
if is_none(datlim) or bn.total(bn.atleast_1d(datlim).convert_into_one_dim() == -9999):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
if is_none(datlimz) or bn.total(bn.atleast_1d(datlim).convert_into_one_dim() == -9999):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
if is_none(dims):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
if is_none(pval_ctotalback):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_ctotalback('time')
v = bn.asany_conditionnumset(v, dtype=bn.float)
v = ntp_to_month(v)
z.apd(v)
else:
# Fetch the dimension from the ctotalback method
v = pval_ctotalback(dim)
z.apd(v)
if len(dims)>1:
z = bn.pile_operation_col(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements ftotal into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for pretotal_countably good data and 0 for data
pretotal_counted bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
filter_condition
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = ibnut data set, a numeric reality scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column numset with the get_minimum (column 1) and get_maximum
(column 2) values considered valid.
datlimz = numset with the locations filter_condition datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company_condition Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if total ibnuts are numeric and reality
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).total():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreality(arg).total():
raise ValueError('\'{0}\' must be reality'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.change_shape_to(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.change_shape_to(datlimz.shape[1:])
# test size and shape of the ibnut numsets datlimz and datlim, setting test
# variables.
numset_size = datlimz.shape
if len(numset_size) == 1:
numlim = numset_size[0]
ndim = 1
else:
numlim = numset_size[0]
ndim = numset_size[1]
numset_size = datlim.shape
tmp1 = numset_size[0]
tmp2 = numset_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D numset '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z ibnut numset
numset_size = z.shape
if len(numset_size) == 1:
num = numset_size[0]
tmp2 = 1
else:
num = numset_size[0]
tmp2 = numset_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not total(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# deterget_mine the lower limits using linear interpolation
lim1 = bn.interp(z, datlimz, datlim[:, 0], left=bn.nan, right=bn.nan)
# deterget_mine the upper limits using linear interpolation
lim2 = bn.interp(z, datlimz, datlim[:, 1], left=bn.nan, right=bn.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# deterget_mine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].change_shape_to(numlim, 1))
lim1 = F(z).change_shape_to(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# deterget_mine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].change_shape_to(numlim, 1))
lim2 = F(z).change_shape_to(dat.size)
# replace NaNs from above interpolations
ff = (bn.ifnan(lim1)) | (bn.ifnan(lim2))
lim1[ff] = bn.get_max(datlim[:, 1])
lim2[ff] = bn.get_min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.convert_type('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, bn.atleast_1d(acc)[-1], bn.atleast_1d(N)[-1], bn.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for pretotal_countably good data and 0 for data pretotal_counted bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (get_max.
get_minus get_min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is pretotal_counted to be good, i.e. no spike, if it deviates from the
average of the (L-1) peers by less than a multiple of the range,
N*get_max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrictotaly before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a get_minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
filter_condition
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = ibnut data set, a numeric, reality vector.
acc = Accuracy of any_condition ibnut measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company_condition Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = bn.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).total():
raise ValueError('\'dat\' must be numeric')
if not utils.isreality(dat).total():
raise ValueError('\'dat\' must be reality')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).total():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreality(arg).total():
raise ValueError('\'{0}\' must be reality'.format(k))
dat = bn.asany_conditionnumset(dat, dtype=bn.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstandard_op, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstandard_op) or is_fill(ord_n):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, bn.atleast_1d(ord_n)[-1], bn.atleast_1d(nstandard_op)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstandard_op=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is astotal_counted to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the differenceerence dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is astotal_counted to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstandard_op, strict_validation)
filter_condition
qcflag = Boolean, 0 a trend is detected, 1 elsefilter_condition.
dat = Ibnut dataset, a numeric reality vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstandard_op (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of ibnuts.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company_condition Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = bn.atleast_1d(dat)
t = bn.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstandard_op': nstandard_op}.iteritems():
if not utils.isnumeric(arg).total():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreality(arg).total():
raise ValueError('\'{0}\' must be reality'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstandard_op': nstandard_op}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(absolute(ord_n)))
nstandard_op = int(absolute(nstandard_op))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = bn.polyfit(t, dat, ord_n)
datpp = bn.polyval(pp, t)
# test for a trend
if bn.atleast_1d((bn.standard_op(dat - datpp) * nstandard_op) < bn.standard_op(dat)).total():
trndtst = 0
else:
trndtst = 1
# insure output size equals ibnut, even though test yields a single value.
qcflag = bn.create_ones(dat.shape).convert_type('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = bn.empty(x.shape, bn.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, bn.atleast_1d(reso)[-1], bn.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. duplicateed occurences of one value. Returns 1 for
pretotal_countably good data and 0 for data pretotal_counted bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
filter_condition
qcflag = Boolean output: 0 filter_condition stuck values are found, 1 elsefilter_condition.
x = Ibnut time series (vector, numeric).
reso = Resolution; duplicate values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company_condition Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = bn.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).total():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreality(dat).total():
raise ValueError('\'x\' must be reality')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).total():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreality(arg).total():
raise ValueError('\'{0}\' must be reality'.format(k))
num = bn.absolute(num)
dat = bn.asany_conditionnumset(dat, dtype=bn.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = bn.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, get_mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(get_mindx) or is_fill(get_mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = bn.empty(dat.shape, dtype=bn.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-bn.atleast_1d(ddatdx)[-1], bn.atleast_1d(ddatdx)[-1]], bn.atleast_1d(get_mindx)[-1], bn.atleast_1d(startdat)[-1], bn.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, get_mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points ftotal within a certain range.
Ibnut data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use get_mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, get_mindx,
startdat, toldat);
filter_condition
outdat = same as dat except that NaNs and values not meeting get_mindx are
removed.
outx = same as x except that NaNs and values not meeting get_mindx are
removed.
outqc = output quality control flags for outdat. 0 averages bad data, 1
averages good data.
dat = ibnut dataset, a numeric reality vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
get_mindx = scalar. get_minimum dx for which this test will be applied (data
that are less than get_mindx apart will be remove_operationd). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is pretotal_counted good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company_condition Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not total( | bn.difference(x) | numpy.diff |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 14:24:38 2019
@author: thomas
"""
import beatnum as bn
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from beatnum.polynomial.polynomial import polyval
import libconstants as const
import time
import random
# exponential response function - used for testing
def expres(a,t):
x = bn.zeros(t.size)
i = bn.filter_condition(t >= 0)
x[i] = a*bn.exp(-a*t[i])
return(x)
def calcfreqaxis(t):
# calculate frequency axis
Dt = t[1]-t[0]
Nt = t.size
Dfs = 1.0/(Nt*Dt)
freqaxis = bn.arr_range( -Nt/2.0, Nt/2.0, 1.0) * Dfs
return(freqaxis)
def rms(x):
"""
Calculate RMS value of signal
"""
S=bn.total_count(bn.absolute(x)**2.0) / x.size
return bn.sqrt(S)
# analog Fourier transform via FFT
def spec(t,x):
Dt = t[1]-t[0]
Nt = t.size
Df = 1.0/(Nt*Dt)
f = bn.arr_range( -Nt/2.0, Nt/2.0, 1.0) * Df
X = Dt * bn.fft.fftshift( bn.fft.fft (bn.fft.fftshift(x) ))
return f,X
# inverseerse analog Fourier transfrom via IFFT
def inversespec(f,X):
Df = f[1]-f[0]
Nf = f.size
Dt = 1.0/(Nf*Df)
t = bn.arr_range( -Nf/2.0, Nf/2.0, 1.0) * Dt
x = Nf * Df * bn.fft.fftshift( bn.fft.ifft (bn.fft.fftshift(X) ))
return t,x
# convert digital signal to analog
def converttoanalog(t,din,Ts,t0=0.0,gfilterbandwidth=None):
t=t-t0
m=bn.round( t/Ts ).convert_type(int)
N=din.size
x=bn.zeros(t.size)
i=bn.filter_condition( (m>=0) & (m < N) )
x[i]=din[m[i]]
if gfilterbandwidth!=None:
f,P=spec(t,x)
H=bn.exp(-f**2.0/2/gfilterbandwidth**2)
Q=P*H
_,x=inversespec(f,Q)
return(x)
# sample analog waveform
def sample(t,x,Ts,toffset=0.0,tinitial=None,tduration=None):
if tinitial == None:
tinitial = bn.get_min(t)
if tduration == None:
tduration = bn.get_max(t) - bn.get_min(t)
# find time instances within the specified interval
ts = t[ (t>=tinitial) & (t<tinitial + tduration) ]
# subtract to set the first time instance at t=0
ts = ts - tinitial
# obtain the corresponding values of the analog waveform
xs= x[ (t>=tinitial) & (t<tinitial + tduration) ]
# find in which sample duration the values of the time axis correspond
m = bn.floor( ts/Ts ).convert_type(int)
# sampling times
tout = m*Ts
tout = bn.uniq(tout) + toffset
# sample by interpolation
dout = bn.interp(tout,ts,xs)
# remember to reset the time axis
# check wheter we exceed the get_maximum duration
dout = dout[(tout >= tinitial) & (tout < tinitial + tduration)]
tout = tout[(tout >= tinitial) & (tout < tinitial + tduration)]
return(tout,dout)
# provide complex conjugate symmetry so that the IFFT is reality
def add_concatconjugates(din):
N=din.size
# ensure DC component is reality
din[0]=bn.reality(din[0])
# calculate conjugate block
conjblock=bn.flip(bn.conj(din[1:]))
# new block to contain the conjugates
dout=bn.zeros(2*N) + 1j * bn.zeros(2*N)
# original part
dout[0:N]=din
# conjugate part
dout[N+1:]=conjblock
# Nth component must be reality
dout[N]=din[0]
return(dout)
# Generate bit sequences for gray code of order M
def graycode(M):
if (M==1):
g=['0','1']
elif (M>1):
gs=graycode(M-1)
gsr=gs[::-1]
gs0=['0'+x for x in gs]
gs1=['1'+x for x in gsr]
g=gs0+gs1
return(g)
# convert stream of bits to bit blocks of size Mi. If Mi is a beatnum numset the process is duplicateed cyclictotaly.
def bitblockscyc(b,Mi):
blocks=[]
full_value_funcrepetitions=0
curr=0
bitsleft=b
while len(bitsleft) >= Mi[curr]:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks.apd(currbits)
curr=curr+1
if curr>=Mi.size:
curr=0
full_value_funcrepetitions=full_value_funcrepetitions+1
return blocks,bitsleft,full_value_funcrepetitions
# convert stream of bits to bit blocks of size Mi. If Mi is a beatnum numset the process is duplicateed cyclictotaly. Blocks are arranged in two dimensions
def bitblockscyc2D(b,Mi):
blocks=[]
# initialize empty blocks for each value of Mi
for mi in Mi:
blocks.apd([])
full_value_funcrepetitions=0
curr=0
bitsleft=b
while len(bitsleft) >= Mi[curr]:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks[curr].apd(currbits)
curr=curr+1
if curr>=Mi.size:
curr=0
full_value_funcrepetitions=full_value_funcrepetitions+1
return blocks,bitsleft,full_value_funcrepetitions
def counterrors(b1,b2):
"""
Count errors between bit sequences b1 and b2
"""
b1=bitstrtobits(b1)
b2=bitstrtobits(b2)
difference = bn.absolute(b1-b2)
errors=bn.total_count(difference).convert_type(int)
return(errors)
def bitstrblockstobitstr(blocks):
return ''.join(blocks)
# convert stream of bits to bit blocks of size Mi. If Mi is a beatnum numset the process is NOT duplicateed cyclictotaly!!!
def bitblocks(b,Mi):
blocks=[]
curr=0
bitsleft=b
toread=Mi[curr]
while len(bitsleft) >= toread:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks.apd(currbits)
curr=curr+1
if (curr<Mi.size):
toread=Mi[curr]
else:
break
return blocks,bitsleft,curr
# convert a set of bn.numset bits to bit string
def bitstobitstr(b):
bitstr=''
for bi in b:
bitstr=bitstr+str(bi)
return(bitstr)
# convert a bit string to an bn.numset
def bitstrtobits(b):
bits=bn.zeros(len(b))
for i,v in enumerate(b):
bits[i]=int(v)
return(bits)
# plot bits
def visualizebitblock(bitsb,zoomfrom=None,zoomto=None):
fig=plt.figure()
start=1
marker='ro'
color='r'
if isinstance(bitsb,str):
bitsb=[bitsb]
for b in bitsb:
bits=bitstrtobits(b)
end=start+bits.size
x=bn.arr_range(start,end)
plt.stem(x,bits,linefmt=color,markerfmt=marker,use_line_collection=True,basefmt=" ")
if marker=='ro':
marker='bo'
color='b'
else:
marker='ro'
color='r'
start=end
if zoomfrom!=None:
start=zoomfrom
else:
start=1
if zoomto!=None:
end=zoomto
plt.xlim([start,end])
# PAM symbol dictionary
def pamsymbols(M):
m=bn.arr_range(0,M)
symbols=2*m-M+1
return(symbols)
# PAM symbol at index m
def pamsymbol(m,M):
return(2*m-M+1)
def qammapeven(order=16):
"""
QAM Constellation for order = 2^(2n)
"""
m = bn.log2(order).convert_type(int)
Ms = bn.sqrt(order)
gc = graycode( m/2 )
forward = {} # bits to symbols
backward = bn.zeros(order) + 1j * bn.zeros(order)
for i,gi in enumerate(gc):
for j,gj in enumerate(gc):
q = bn.complex(pamsymbol(i,Ms),pamsymbol(j,Ms))
forward[gi+gj] = q
indx = int( gi+gj , 2 )
backward[indx] = q
return forward, backward
def qammapodd(order=32):
"""
Map bit to QAM symbols for M=2^(2n+1) orderings
"""
forward = {} # bits to symbols
backward = bn.zeros(order) + 1j * bn.zeros(order)
m = bn.log2(order).convert_type(int)
if m % 2 == 1:
l = (m-1)/2+1
s = (m-1)/2
l = l.convert_type(int)
Gl = graycode( l )
Gs = graycode( s )
n = ((m-1) / 2).convert_type(int)
# Start from a (m+1) x m configuration
Q = bn.zeros([2**n,2**(n+1)]) + 1j * bn.zeros([2**n,2**(n+1)])
bits = []
for my in range(0,2**n):
B = []
for mx in range(0,2**(n+1)):
Q[my,mx] = (2**(n+1) - 2*mx - 1) +1j * (2**n - 2*my - 1)
B.apd( Gl[mx] + Gs[my])
bits.apd(B)
# Transform constellation
s = 2 ** ( s-1 )
for my in range(0,2**n):
for mx in range(0,2**(n+1)):
q=Q[my,mx]
b=bits[my][mx]
irct = bn.reality( q )
qrct = bn.imaginary( q )
if bn.absolute( irct ) < 3 * s:
i = irct
q = qrct
elif bn.absolute(bn.imaginary(q)) > s:
i = bn.sign( irct ) * (bn.absolute(irct) - 2*s)
q = bn.sign( qrct ) * (4*s - bn.absolute(qrct))
else:
i = bn.sign( irct ) * (4*s - bn.absolute(irct))
q = bn.sign( qrct ) * (bn.absolute(qrct) + 2*s)
forward[b] = i + 1j *q
indx = int( b , 2 )
backward[indx] = forward[b]
return forward, backward
def qammap(order=16):
"""
Map bits to QAM symbols
"""
m = bn.log2(order).convert_type(int)
# is this a rectangular shaped QAM ?
if m % 2 == 0:
forward,backward = qammapeven(order=order)
else:
forward,backward = qammapodd(order=order)
avgpower = bn.average( bn.absolute (backward) ** 2.0 )
forwardn = {}
backwardn = bn.zeros(order) + 1j * bn.zeros(order)
s = bn.sqrt(avgpower)
for x in forward:
forwardn[x] = forward[x] / s
backwardn = backward / s
return forward,backward,forwardn,backwardn,s
def findclosestanddecode(s,backwardmap):
"""
Find closest symbol and decode
"""
N = bn.log2(backwardmap.size).convert_type(int)
p = bn.absolute(backwardmap - s).get_argget_min_value()
sc = backwardmap[p]
b = bn.binary_repr(p,N)
return sc, b
# add_concat cp to symbol sequence
def add_concatcp(s,cplength):
last=s.size
start=last-cplength
scp=bn.connect((s[start:last],s))
return(scp)
"""
Shortcut for converting an element
"""
def makelist(arg,N):
if not(isinstance(arg,list)):
return([arg] * N)
else:
return(arg)
"""
DMT physical layer class
"""
def noise(t,f=None,psd=None):
"""
Add colored or white noise at the receiver
"""
if psd is None:
psd = lambda x: 1
if not ctotalable(psd):
psd = lambda x: bn.interp(x,f,psd)
f = calcfreqaxis(t)
H = psd(f)
Hf = bn.sqrt(H)
r = bn.random.randn(t.size)
R = bn.fft.fft(r)
R = R * Hf
x = bn.fft.fftshift( bn.fft.ifft( bn.fft.fftshift(R) ) )
return( bn.reality(x) )
class dmtphy:
class tiget_mings:
total = 0.0
exectimes = {}
def __init__(self,
nocarriers=16, # number of subcarrier channels
M=64, # QAM order
noframes=16, # frames to be considered
Df=1e6, # subcarrier spacing
cpsize=5, # size of CP prefix
samplespersymbol=40, # samples per symbol duration
tapsize=20, # tapsize for channel coefficients
psd = None, # noise psd,
trfctotalable = None,
sampleoffset=0, # sampling offset at the receiver.
# 0 indicates no offset
# 0.5 Ts/2 offset
# 1 Ts offset
scales = None, # power scales for the carriers, total_count of squares must add_concat up to nocarriers
cliplevel = None, # clipping ratio
dacfilterbandwidth=None, # filter of the DAC
polynl = bn.numset([0, 1]) # nonlinearity polynomial coefficients
):
self.debug = False #change to true if we require debuging
self.timecode = False #change to true if you want to time the execution
'''
Transmitter characteristics
'''
self.ommitzero = True # ignore the zeroth subcarrier
if isinstance(M,int) or isinstance(M,float):
M = M * bn.create_ones(nocarriers)
self.M = M.convert_type(int) # Modulation order for each subcarrier
self.bin = '' # ibnut bits
self.cpsize = cpsize # size of cyclic prefix
self.noframes = int(2* round(noframes /2)) # number of DMT frames - must be an even number
self.nocarriers = nocarriers # number of carriers
self.t0 = 0 # time in the analog time axis filter_condition we astotal_counte that the
# frames start being transmitted
self.samplespersymbol = samplespersymbol # samples per symbol in the analog waveform
self.dacfilterbandwidth = None # filter bandwidth at the output of the DAC
self.framesbefore = 20 # guard period before frame transmission (empty frames)
self.framesafter = 20 # guard period after frame transmission (empty frames)
self.carriermodulation = 'qam' # modulation in carriers
self.forwardmaps = None # forward symbol map for carriers bits -->> symbols
self.backwardmaps = None # backward symbol map for carriers symbols -->> bits
self.forwardmaps = None # normlizattionalized forward symbol map for carriers bits -->> symbols
self.backwardmaps = None # normlizattionalized backward symbol map for carriers symbols -->> bits
self.sic = None # symbols assigned to carriers
self.sicun = None # unscaled symbols assigned to carriers
self.bic = None # bits assigned to carriers
self.txframeswithcp = None # frames at TX with CP
self.txs = None # output symbol sequence fed at the transmitter DAC
self.txsunclipped = None # unclipped waveform samples at the DAC output
self.analogtx = None # analog waveform at TX ourput
self.scales = scales
self.Df = Df # subcarrier spacing
self.Ts = 1.0/Df/(2.0*nocarriers) # sample duration
self.txibnutifftframes = None # ibnut blocks at IFFT ibnut
self.txoutputifftframes = None # output blocks at IFFT ibnut
self.removeimaginarys = True # removes imaginaryinary parts from IFFT output
self.framesamples = cpsize+2*nocarriers # samples per frame
self.Tframe = (cpsize+2*nocarriers)*self.Ts # duration of the DMT frames
self.Tsignal = self.Tframe*self.noframes # duration of the DMT signal (without guard periods)
self.anarlogt = None # analog time
self.centertimeaxis = True # Center the analog time axis
self.analogtx = None # analog waveform at the output of the transmitter
self.analogtxspec = None # analog spectrum at the output of the transmitter
if scales is None:
self.scales = bn.create_ones( self.nocarriers )
else:
self.scales = scales / bn.total_count( scales ) * self.nocarriers
if dacfilterbandwidth is None:
self.dacfilterbandwidth = 3.0/self.Ts
else:
self.dacfilterbandwidth = dacfilterbandwidth
self.normlizattionalizesymbols = True # normlizattionalize symbols so that the average energy is equal to one?
self.scalesforcarriers = None # scales required for symbol normlizattionalization
self.crestfactor = None # Crest factor of the transmitter samples
self.nobits = None # number of bits to be transmitted
self.cliplevel = cliplevel # Clipping level in dB
self.Aclip = None # Amplitude corresponding to the clipping level
self.DC = None # DC level at the transmitter
self.Aget_max = None # specified get_maximum signal amplitude at the transmitter after add_concating DC component
self.polynl = polynl # nonlinearity polynomial
'''
Channel characteristics
'''
self.taps=None # digital channel taps
self.tapssamplesperTs=100 # samples per sample duration when calculating the taps
self.tapsguardperiod=20 # defines guard period when calculating the taps
self.freqaxis=None # frequency axis
self.trf=None # transfer functon of the channel
self.ht=None # analog channel impulse response
self.tapsize=tapsize # number of taps
self.trfctotalable = trfctotalable # ctotalable function for the transfer function of the channel
self.psd = psd # noise psd to be add_concated at the receiver ibnut
'''
Receiver Characteristics
'''
self.analogrx=None # analog waveform at the receiver ibnut
self.analogrxspec=None # analog spectrum at the ibnut of the receiver
self.rxs=None # samples at the ibnut of the receiver
self.toffset=sampleoffset*self.Ts # time offset for sampling at the receiver
self.ts=None # times in which the analog receiver signal is sampled
self.rxsd=None # the samples at the ibnut of the receiver calculated using the digital channel approach
self.rxframeswithcp=None # received DMT frames containing the cyclic prefix
self.rxibnutfftframes=None # received DMT frames without the cyclic prefix
self.rxoutputfftframes=None # frames at the output of the FFT block
self.rxsic=None # symbols assigned to carriers
self.eqtaps=None # equalization taps. If None then simply use the inverseerse of the channel taps in the frequency domain
self.rxsic=None # symbols obtained at RX subcarrier channels
self.rxsicun=None # unscaled symbol estimates (original constellation)
self.siest=None # symbol estimates after hard decoding
self.rxbic=None # bit estimates at subcarriers
self.bout=None # bits obtained at the output of the receiver
self.berrors=None # bit errors
self.berrorsinc=None # bit errors in carrier channels
self.snr=None # Receive SNR at the various carrier channels
'''
Simulation Sequences
'''
# self.seqdig = ['setrandombits','setsymbolmaps','setcarriersymbols','calcifftibnut',
# 'calcifftoutput','calccptxframes','calctxsymbols',
# 'cliptxsamples','normlizattionalizetxs','makedc','applytxnl','calctaps',
# 'applydigitalchannel','removeDC','calcrxframes',
# 'removecprxframes','calcfftoutput','calcrxcarriersamples',
# 'calcrxestimates','calcrxbits','calcerrors','calcsnrevm','calcber'
# ]
self.seqdig = ['setrandombits','setsymbolmaps','setcarriersymbols','calcifftibnut',
'calcifftoutput','calccptxframes','calctxsymbols',
'cliptxsamples','normlizattionalizetxs','makedc','applytxnl','calctaps',
'applydigitalchannel','normlizattionalizerxs','calcrxframes','removeDC',
'removecprxframes','calcfftoutput','calcrxcarriersamples',
'calcrxestimates','calcrxbits','calcerrors','calcsnrevm','calcber'
]
self.seqanl = ['setrandombits','setsymbolmaps','setcarriersymbols','calcifftibnut',
'calcifftoutput','calccptxframes','calctxsymbols',
'cliptxsamples','normlizattionalizetxs','makedc','applytxnl','calctaps','calctxwaveform','setchanneltrf',
'applyanalogchannel','calcadcoutput','removeDC','calcrxframes',
'removecprxframes','calcfftoutput','calcrxcarriersamples',
'calcrxestimates','calcrxbits','calcerrors','calcsnrevm'
]
# define the set of ibnut bits, argument is a bn numset
def setibnutbits(self,bi):
self.bin=bitstobitstr(bi)
# define the set of ibnut bits, argument is a bit string
def setibnutbitstr(self,bistr):
self.bin=bistr
def calcnumberofbits(self):
"""
Calculate number of bits to be transmitted
"""
# do we exclude the zeroth subcarrier?
if self.ommitzero:
bitsperframe = total_count(bn.log2(self.M[1:]).convert_type(int))
else:
bitsperframe = total_count(bn.log2(self.M).convert_type(int))
Nbits=bitsperframe*self.noframes
self.nobits = Nbits
# assign random bits corresponding to the required frames
def setrandombits(self):
self.calcnumberofbits()
bstr = ''.join(random.choice(['0','1']) for i in range(self.nobits))
self.setibnutbitstr(bstr)
self.datarate = self.nobits / self.Tsignal
# set bits to carriers
def setcarrierbitstr(self,blockstr):
# check out dimensions of blockstr
blockpercarrier=len(blockstr[0])
# if we ommit the zeroth subcarrier then assign no bits to it
if self.ommitzero:
block2=[''] * blockpercarrier
blockstr2=[block2]
blockstr2.extend(blockstr)
else:
blockstr2=blockstr
self.bic=blockstr2
# read ibnut bit sequence and assign symbol sequences to subcarriers - removes bits from ibnut bit stream
def setbitstocarriers(self):
# check if we need to ommit the zeroth subcarrier
if self.ommitzero:
nobitspercarrier = bn.log2(self.M[1:]).convert_type(int)
else:
nobitspercarrier = bn.log2(self.M).convert_type(int)
# read the bits
blocks,bitsremaining,noframes=bitblockscyc2D(self.bin,nobitspercarrier)
# assign bit blocks to carriers
self.setcarrierbitstr(blocks)
def setsymbolmaps(self):
"""
Set up symbol maps for subcarriers
"""
self.backwardmaps = []
self.forwardmaps = []
self.backwardmapsn = []
self.forwardmapsn = []
self.scalesforcarriers = bn.zeros( self.nocarriers )
for i in range(0,self.nocarriers):
fm,bm,fmn,bmn,s = qammap( self.M[i] )
self.backwardmaps.apd( bm )
self.forwardmaps.apd( fm )
self.backwardmapsn.apd( bmn )
self.forwardmapsn.apd( fmn )
self.scalesforcarriers[i] = s
# assign symbols to carriers by reading the ibnut bits - removes bits from ibnut bit stream
def setcarriersymbols(self,debug=False):
# assign bits to carriers
self.setbitstocarriers()
# create numset for symbol storage.
self.sic = bn.zeros([self.nocarriers,self.noframes]) + 1j * bn.zeros([self.nocarriers,self.noframes])
self.sicun = bn.zeros([self.nocarriers,self.noframes]) + 1j * bn.zeros([self.nocarriers,self.noframes])
for nc in range(0,self.nocarriers):
blocks=self.bic[nc]
if debug:
print('Carrier: %d) has modulation order %d and blocks:' %(nc,self.M[nc]))
print(blocks)
for ib,block in enumerate(blocks):
# Check for subcarrier modulation
if self.carriermodulation == 'qam':
if block != '':
q = self.forwardmaps[nc][block]
qn = self.forwardmapsn[nc][block]
else:
q = 0
qn = 0
self.sic[nc,ib] = qn
self.sicun[nc,ib] = q
if debug:
print('Carrier %d,Block %d bit sequence %s corresponds to symbol %6.2f+j%6.2f' %(nc,ib,block,bn.reality(q),bn.imaginary(q)))
if debug:
print('\n')
# calculate ibnut frames to the ifft block of the transmitter
def calcifftibnut(self):
self.txibnutifftframes = []
for nf in range(0,self.noframes):
frame = self.sic[:,nf]
self.txibnutifftframes.apd( add_concatconjugates( bn.sqrt(self.scales) * frame ))
# calculate out frames of the ifft block at the transmitter
def calcifftoutput(self):
self.txoutputifftframes = []
for frame in self.txibnutifftframes:
ifftout = bn.fft.ifft ( frame )
if self.removeimaginarys:
self.txoutputifftframes.apd ( bn.reality (ifftout) )
else:
self.txoutputifftframes.apd ( ifftout )
def calccptxframes(self):
"""
add_concat cyclic prefix to frames
"""
self.txframeswithcp = []
for i,frame in enumerate(self.txoutputifftframes):
self.txframeswithcp.apd(add_concatcp(frame,self.cpsize))
def calctxsymbols(self):
"""
calculate output symbol sequence to be fed to the TX DAC
"""
self.txs=self.txframeswithcp[0]
if self.noframes > 0:
for i in range(1,self.noframes):
self.txs=bn.connect((self.txs,self.txframeswithcp[i]))
self.powertx = bn.average( bn.absolute( self.txs ) ** 2.0 ) # power of the digital signal
def cliptxsamples(self):
"""
Clip the samples at the TX output
"""
if not (self.cliplevel is None):
s = self.powertx
R = 10.0 ** (self.cliplevel/10.0)
A = bn.sqrt(R * s)
self.Aclip = A
i=bn.filter_condition( bn.absolute(self.txs) > self.Aclip)
self.txsunclipped = bn.copy(self.txs)
self.txs[i] = self.Aclip * bn.sign( self.txs[i] )
def normlizattionalizetxs(self):
"""
Normalize transmitted samples so that they ftotal inside [-1, 1]
"""
self.txsu = bn.copy(self.txs)
self.txs = self.txs / self.Aclip
def applytxnl(self):
"""
Apply nonlinearity polynomial at the transmitter side
"""
# linear version of the transmitted samples
self.txsl = bn.copy(self.txs)
# apply nonlinearity
self.txs = polyval(self.txs, self.polynl)
def makedc(self):
"""
Renders the ouput waveform to DC waveform
Normalizes the output of the transmitter so that it corresponds to an output level
deterget_mined by self.Aget_max
"""
self.txsac = bn.copy(self.txs)
if self.Aclip is None:
self.DC = bn.get_max( | bn.absolute(self.txs) | numpy.abs |
from __future__ import print_function
from matplotlib.collections import PatchCollection,LineCollection
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
import beatnum as bn
from .. import utils
def plot_linestring(ls,**kwargs):
ax=kwargs.pop('ax',plt.gca())
c = bn.numset(ls.coords)
return ax.plot( c[:,0],c[:,1],**kwargs)[0]
def plot_multilinestring(mls,**kwargs):
ax=kwargs.pop('ax',plt.gca())
if mls.type == 'MultiLineString':
segs = [bn.numset(ls.coords) for ls in mls.geoms]
coll = LineCollection(segs,**kwargs)
ax.add_concat_collection(coll)
return coll
else:
return plot_linestring(mls,**kwargs)
########
# New, non-hacked way to plot polygons with holes
# From: http://sgillies.net/blog/1013/painting-punctured-polygons-with-matplotlib/
def ring_coding(ob):
# The codes will be total "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(ob.coords)
codes = bn.create_ones(n, dtype=Path.code_type) * Path.LINETO
codes[0] = Path.MOVETO
# unsure of differenceerence between CLOSEPOLY and leaving as is.
# codes[-1] = Path.CLOSEPOLY # doesn't seem to make a differenceerence
return codes
def pathify(polygon):
# Convert coordinates to path vertices. Objects produced by Shapely's
# analytic methods have the proper coordinate order, no need to sort.
# 20170707: matplotlib pickier about ordering of internal rings, may have
# reverse interiors.
# 20170719: shapely doesn't guarantee one order or the other
def ensure_orientation(a,ccw=True):
"""
take an numset-like [N,2] set of points defining a polygon,
return an numset which is ordered ccw (or cw is ccw=False)
"""
a= | bn.asnumset(a) | numpy.asarray |
# -*- coding: utf-8 -*-
"""Console script for exo."""
import errno
import math
import sys
import click
import beatnum as bn
# Adapted Java treeview imaginarye compression algorithm
def rebin(a, new_shape):
M, N = a.shape
m, n = new_shape
if m >= M:
# duplicate rows in data matrix
a = bn.duplicate(a, math.ceil(float(m) / M), axis=0)
M, N = a.shape
m, n = new_shape
row_remove_operation_num = M % m
col_remove_operation_num = N % n
bn.random.seed(seed=0)
if row_remove_operation_num > 0:
# select remove_operationd rows with equal intervals
row_remove_operation = bn.linspace(0, M - 1, num=row_remove_operation_num, dtype=int)
# sort the random selected remove_operationd row ids
row_remove_operation = bn.sort(row_remove_operation)
row_remove_operation_plus1 = row_remove_operation[1:-1] + \
1 # get remove_operationd rows plus position
# get remove_operationd rows plus position (top +1; end -1)
row_remove_operation_plus1 = bn.apd(
bn.apd(row_remove_operation[0] + 1, row_remove_operation_plus1), row_remove_operation[-1] - 1)
# put the info of remove_operationd rows into the next rows by average
a[row_remove_operation_plus1, :] = (
a[row_remove_operation, :] + a[row_remove_operation_plus1, :]) / 2
a = bn.remove_operation(a, row_remove_operation, axis=0) # random remove rows
if col_remove_operation_num > 0:
# select remove_operationd cols with equal intervals
col_remove_operation = bn.linspace(0, N - 1, num=col_remove_operation_num, dtype=int)
# sort the random selected remove_operationd col ids
col_remove_operation = bn.sort(col_remove_operation)
col_remove_operation_plus1 = col_remove_operation[1:-1] + \
1 # get remove_operationd cols plus position
# get remove_operationd cols plus position (top +1; end -1)
col_remove_operation_plus1 = bn.apd(
bn.apd(col_remove_operation[0] + 1, col_remove_operation_plus1), col_remove_operation[-1] - 1)
# put the info of remove_operationd cols into the next cols by average
a[:, col_remove_operation_plus1] = (
a[:, col_remove_operation] + a[:, col_remove_operation_plus1]) / 2
a = | bn.remove_operation(a, col_remove_operation, axis=1) | numpy.delete |