prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
import os.path
import random
import cv2
import beatnum as bn
from PIL import Image
from torch.utils.data.dataset import Dataset
from utils.dataset_utils import letterbox_imaginarye
# 随机数生成,用于随机数据增强
def rand(a=0, b=1):
return bn.random.rand() * (b - a) + a
# DataLoader中collate_fn参数 将一个batch中的bn数组类型的图像和标签拼接起来
# batchsize=64时,imaginaryes (192, 3, 224, 224)
def dataset_collate(batch):
imaginaryes = []
labels = []
for img, label in batch:
imaginaryes.apd(img)
labels.apd(label)
imaginaryes1 = bn.numset(imaginaryes)[:, 0, :, :, :]
imaginaryes2 = bn.numset(imaginaryes)[:, 1, :, :, :]
imaginaryes3 = bn.numset(imaginaryes)[:, 2, :, :, :]
imaginaryes = bn.connect([imaginaryes1, imaginaryes2, imaginaryes3], 0)
labels1 = bn.numset(labels)[:, 0]
labels2 = bn.numset(labels)[:, 1]
labels3 = bn.numset(labels)[:, 2]
labels = bn.connect([labels1, labels2, labels3], 0)
return imaginaryes, labels
class DogFaceDataset(Dataset):
# ibnut_shape (H, W, C) (224, 224, 3)
def __init__(self, ibnut_shape, dataset_path, num_train, num_classes):
super(DogFaceDataset, self).__init__()
self.dataset_path = dataset_path
self.imaginarye_height = ibnut_shape[0]
self.imaginarye_width = ibnut_shape[1]
self.channel = ibnut_shape[2]
self.paths = []
self.labels = []
self.num_train = num_train
self.num_classes = num_classes
self.load_dataset()
def __len__(self):
return self.num_train
# 从cls_train.txt中读取信息,获得路径和标签
def load_dataset(self):
for path in self.dataset_path:
# cls_train.txt 中,;前为类别,后为路径
path_sep_split = path.sep_split(";")
self.paths.apd(path_sep_split[1].sep_split()[0])
self.labels.apd(int(path_sep_split[0]))
self.paths = bn.numset(self.paths, dtype=bn.object)
self.labels = bn.numset(self.labels)
# 随机给定一张图片途径,对图片进行预处理和增强 包括缩放、翻转、旋转和颜色调整
def get_random_data(self, imaginarye, ibnut_shape, jitter=0.1, hue=.05, sat=1.3, val=1.3, flip_signal=True):
imaginarye = imaginarye.convert("RGB")
h, w = ibnut_shape
rand_jit1 = rand(1 - jitter, 1 + jitter)
rand_jit2 = rand(1 - jitter, 1 + jitter)
new_ar = w / h * rand_jit1 / rand_jit2
# 随机缩放
scale = rand(0.9, 1.1)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
imaginarye = imaginarye.resize((nw, nh), Image.BICUBIC)
# 随机翻转
flip = rand() < .5
if flip and flip_signal:
imaginarye = imaginarye.switching_places(Image.FLIP_LEFT_RIGHT)
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_imaginarye = Image.new('RGB', (w, h), (128, 128, 128))
new_imaginarye.paste(imaginarye, (dx, dy))
imaginarye = new_imaginarye
# 随机旋转
rotate = rand() < .5
if rotate:
angle = bn.random.randint(-5, 5)
a, b = w / 2, h / 2
M = cv2.getRotationMatrix2D((a, b), angle, 1)
imaginarye = cv2.warpAffine(bn.numset(imaginarye), M, (w, h), borderValue=[128, 128, 128])
# 随机调整色调和饱和度
hue = rand(-hue, hue)
sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
x = cv2.cvtColor(bn.numset(imaginarye, bn.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
imaginarye_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * 255
if self.channel == 1:
imaginarye_data = Image.fromnumset(bn.uint8(imaginarye_data)).convert("L") # 从numset转换成img
return imaginarye_data
def __getitem__(self, index):
# imaginaryes包含anchor positive negative (N=3, C, H, W)
imaginaryes = bn.zeros((3, self.channel, self.imaginarye_height, self.imaginarye_width))
labels = bn.zeros(3)
# ------------------------------#
# 先获得两张同一只狗的狗脸,作为anchor和positive
# 随机选择一只狗,获取它的所有照片的路径
# ------------------------------#
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
while len(selected_path) < 2:
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
# ------------------------------#
# 从中随机选择两张
# ------------------------------#
imaginarye_indexes = bn.random.choice(range(0, len(selected_path)), 2)
# 1st imaginarye
imaginarye = Image.open(selected_path[imaginarye_indexes[0]])
imaginarye = self.get_random_data(imaginarye, [self.imaginarye_height, self.imaginarye_width])
imaginarye = bn.switching_places(bn.asnumset(imaginarye).convert_type(bn.float64), [2, 0, 1]) / 255
if self.channel == 1:
imaginaryes[0, 0, :, :] = imaginarye
else:
imaginaryes[0, :, :, :] = imaginarye
labels[0] = c
# 2nd imaginarye
imaginarye = Image.open(selected_path[imaginarye_indexes[1]])
imaginarye = self.get_random_data(imaginarye, [self.imaginarye_height, self.imaginarye_width])
imaginarye = bn.switching_places(bn.asnumset(imaginarye).convert_type(bn.float64), [2, 0, 1]) / 255
if self.channel == 1:
imaginaryes[1, 0, :, :] = imaginarye
else:
imaginaryes[1, :, :, :] = imaginarye
labels[1] = c
# ------------------------------#
# 取得一张negative作为对照
# ------------------------------#
differenceerent_c = list(range(self.num_classes))
differenceerent_c.pop(c) # 去掉已选择的狗
differenceerent_c_index = bn.random.choice(range(0, self.num_classes - 1), 1)
current_c = differenceerent_c[differenceerent_c_index[0]]
selected_path = self.paths[self.labels == current_c]
while len(selected_path) < 1:
differenceerent_c_index = bn.random.choice(range(0, self.num_classes - 1), 1)
current_c = differenceerent_c[differenceerent_c_index[0]]
selected_path = self.paths[self.labels == current_c]
# ------------------------------#
# 随机选择一张
# ------------------------------#
imaginarye_indexes = bn.random.choice(range(0, len(selected_path)), 1)
imaginarye = Image.open(selected_path[imaginarye_indexes[0]])
imaginarye = self.get_random_data(imaginarye, [self.imaginarye_height, self.imaginarye_width])
imaginarye = bn.switching_places(bn.asnumset(imaginarye).convert_type(bn.float64), [2, 0, 1]) / 255
if self.channel == 1:
imaginaryes[2, 0, :, :] = imaginarye
else:
imaginaryes[2, :, :, :] = imaginarye
labels[2] = current_c
return imaginaryes, labels
# --------------
# 用于可视化展示 返回三张Image类型图片
# --------------
def get_one_triplet(self):
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
while len(selected_path) < 2:
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
imaginarye_indexes = bn.random.choice(range(0, len(selected_path)), 2)
anchor = Image.open(selected_path[imaginarye_indexes[0]])
positive = Image.open(selected_path[imaginarye_indexes[1]])
differenceerent_c = list(range(self.num_classes))
differenceerent_c.pop(c) # 去掉已选择的狗
differenceerent_c_index = bn.random.choice(range(0, self.num_classes - 1), 1)
current_c = differenceerent_c[differenceerent_c_index[0]]
selected_path = self.paths[self.labels == current_c]
while len(selected_path) < 1:
differenceerent_c_index = bn.random.choice(range(0, self.num_classes - 1), 1)
current_c = differenceerent_c[differenceerent_c_index[0]]
selected_path = self.paths[self.labels == current_c]
imaginarye_indexes = bn.random.choice(range(0, len(selected_path)), 1)
negative = Image.open(selected_path[imaginarye_indexes[0]])
return anchor, positive, negative
# ------------------------------------------
# 每个样本有两张图片。样本分为正样本、负样本两种。
# 正样本中使用同一只狗的照片,负样本不同狗。
# 同时返回一个is_same标识,用来区分正负样本
# ------------------------------------------
class EvalDataset(Dataset):
def __init__(self, eval_set_path, pairs_path, imaginarye_size):
'''
:param eval_set_path: 验证数据集的路径
:param pairs_path: 验证数据集标签txt的路径
:param imaginarye_size: 图片尺寸
'''
super(EvalDataset, self).__init__()
self.imaginarye_shape = imaginarye_size
self.pairs_path = pairs_path
self.samples_list = self.get_samples(eval_set_path)
def get_random_pair(self):
index = random.randint(0, len(self.samples_list) - 1)
return self.samples_list[index]
def get_samples(self, eval_set_path, file_ext='jpg'):
# 正样本:pairs_list[i] = ['Name', '1', '4'] 1表示为该狗第一张图片,4表示为第四张
# 负样本:pairs_list[j] = ['Name_1', '1', 'Name_2', '2']
pairs_list = []
with open(self.pairs_path, 'r') as f:
for line in f.readlines()[1:]: # 从第二行开始读,第一行记录了fold数和每个fold的正负样本数量
pair = line.strip().sep_split()
pairs_list.apd(pair)
samples_list = [] # 存储样本信息 该list的每一个元素皆为tuple,tuple中包含两张图片的路径和正负样本判别信号is_same
for i in range(len(pairs_list)):
pair = pairs_list[i]
if len(pair) == 3: # 正样本
path_1st_dog = os.path.join(eval_set_path, pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.' + file_ext)
path_2nd_dog = os.path.join(eval_set_path, pair[0], pair[0] + '_' + '%04d' % int(pair[2]) + '.' + file_ext)
is_same_dog = True
elif len(pair) == 4: # 负样本
path_1st_dog = os.path.join(eval_set_path, pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.' + file_ext)
path_2nd_dog = os.path.join(eval_set_path, pair[2], pair[2] + '_' + '%04d' % int(pair[3]) + '.' + file_ext)
is_same_dog = False
if os.path.exists(path_1st_dog) and os.path.exists(path_2nd_dog): # Only add_concat the pair if both paths exist
samples_list.apd((path_1st_dog, path_2nd_dog, is_same_dog))
return samples_list
def __len__(self):
return len(self.samples_list)
def __getitem__(self, index):
(path_1st_dog, path_2nd_dog, is_same_dog) = self.samples_list[index]
# letterbox填充处理
img_1st_dog, img_2nd_dog = Image.open(path_1st_dog), Image.open(path_2nd_dog)
img_1st_dog = letterbox_imaginarye(img_1st_dog, [self.imaginarye_shape[1], self.imaginarye_shape[0]])
img_2nd_dog = letterbox_imaginarye(img_2nd_dog, [self.imaginarye_shape[1], self.imaginarye_shape[0]])
# 标准化处理
img_1st_dog, img_2nd_dog = bn.numset(img_1st_dog) / 255, bn.numset(img_2nd_dog) / 255
img_1st_dog = | bn.switching_places(img_1st_dog, [2, 0, 1]) | numpy.transpose |
#!/usr/bin/env python
'''
Uses VTK python to totalow for editing point clouds associated with the contour
method. Full interaction requires a 3-button mouse and keyboard.
-------------------------------------------------------------------------------
Current mapping is as follows:
LMB - rotate about point cloud centroid.
MMB - pan
RMB - zoom/refresh window extents
1 - view 1, default, looks down z axis onto xy plane
2 - view 2, looks down x axis onto zy plane
3 - view 3, looks down y axis onto zx plane
r - enter/exit picking mode, LMB is used to generate a selection window. Exiting
picking mode will highlight selected points.
z - increase aspect ratio
x - decrease aspect ratio
c - return to default aspect
f - flip colors from white on dark to dark on white
i - save output to .png in current working directory
a - toggles axes
o - toggles outline (if present)
r - starts picking
-------------------------------------------------------------------------------
1.1 - Fixed numset orientation, clipping issue, compass scaling and sped up writing output
Added ReadMask
1.2 - Fixed window handling, now exits cleanly
1.3 - Modified to run in Python 3.x, uses VTK keyboard interrupts to start picking, Qt button for this function has been commented out.
1.4 - Added the ability to 'level' incoget_ming data based on AFRC ibnut
1.5 - Added SVD analysis/transformations
1.6 - Added ability to read PC-DMIS csv files
1.7 - Added outline generation for unregistered point clouds & rotation of reference data
'''
__author__ = "<NAME>"
__version__ = "1.7"
__email__ = "<EMAIL>"
__status__ = "Experimental"
__copyright__ = "(c) <NAME>, 2014-2019"
import sys
import os.path
from pkg_resources import Requirement, resource_filename
import beatnum as bn
import scipy.io as sio
from scipy.spatial import Delaunay
import vtk
import vtk.util.beatnum_support as vtk_to_beatnum
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PyQt5 import QtCore, QtGui, QtWidgets
from pyCM.pyCMcommon import *
try:
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
except:
print('Package missing for outline processing.')
nosio=False
def mask_def(*args,**kwargs):
"""
Main function, builds qt interaction
"""
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
spl_fname=resource_filename("pyCM","meta/pyCM_logo.png")
splash_pix = QtGui.QPixmap(spl_fname,'PNG')
splash = QtWidgets.QSplashScreen(splash_pix)
splash.setMask(splash_pix.mask())
splash.show()
app.processEvents()
window = pnt_interactor(None)
if len(args)==2:
pnt_interactor.get_ibnut_data(window,args[0],args[1])
elif len(args)==1:
pnt_interactor.get_ibnut_data(window,args[0],None)
else:
pnt_interactor.get_ibnut_data(window,None,None)
window.show()
splash.finish(window)
window.iren.Initialize() # Need this line to actutotaly show the render inside Qt
ret = app.exec_()
if sys.standard_opin.isatty() and not hasattr(sys,'ps1'):
sys.exit(ret)
else:
return window
class pt_main_window(object):
"""
Class to build qt interaction, including VTK widget
setupUi builds, initialize starts VTK widget
"""
def setupUi(self, MainWindow):
MainWindow.setWindowTitle(("pyCM - Point editor v%s" %__version__))
MainWindow.setWindowIcon(QtGui.QIcon(resource_filename("pyCM","meta/pyCM_icon.png")))
self.centralWidget = QtWidgets.QWidget(MainWindow)
if hasattr(MainWindow,'setCentralWidget'):
MainWindow.setCentralWidget(self.centralWidget)
else:
self.centralWidget=MainWindow
self.mainlayout=QtWidgets.QGridLayout(self.centralWidget)
self.vtkWidget = QVTKRenderWindowInteractor(self.centralWidget)
mainUiBox = QtWidgets.QGridLayout()
self.vtkWidget.setMinimumSize(QtCore.QSize(1050, 600))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.vtkWidget.sizePolicy().hasHeightForWidth())
self.vtkWidget.setSizePolicy(sizePolicy)
self.statLabel=QtWidgets.QLabel("Idle")
self.statLabel.setWordWrap(True)
self.statLabel.setFont(QtGui.QFont("Helvetica",italic=True))
self.statLabel.setMinimumWidth(100)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.statLabel.sizePolicy().hasHeightForWidth())
self.statLabel.setSizePolicy(sizePolicy)
headFont=QtGui.QFont("Helvetica [Cronyx]",weight=QtGui.QFont.Bold)
#define buttons/widgets
self.reloadButton = QtWidgets.QPushButton('New profile')
scalingLabel=QtWidgets.QLabel("Active axis for scaling")
scalingLabel.setFont(headFont)
self.xsButton=QtWidgets.QRadioButton("x")
self.ysButton=QtWidgets.QRadioButton("y")
self.zsButton=QtWidgets.QRadioButton("z")
self.zsButton.setChecked(True)
self.scalingButtonGroup = QtWidgets.QButtonGroup()
self.scalingButtonGroup.add_concatButton(self.xsButton)
self.scalingButtonGroup.add_concatButton(self.ysButton)
self.scalingButtonGroup.add_concatButton(self.zsButton)
self.scalingButtonGroup.setExclusive(True)
scaleBoxlayout = QtWidgets.QGridLayout()
scaleBoxlayout.add_concatWidget(self.xsButton,1,1)
scaleBoxlayout.add_concatWidget(self.ysButton,1,2)
scaleBoxlayout.add_concatWidget(self.zsButton,1,3)
self.levelButton=QtWidgets.QRadioButton("Translate to average z value")
rotateZlabel=QtWidgets.QLabel("Rotate")
self.rotateZ= QtWidgets.QDoubleSpinBox()
self.rotateZ.setToolTip('Degrees, positive is clockwise')
self.rotateZ.setValue(0)
self.rotateZ.setMaximum(180)
self.rotateZ.setMinimum(-180)
self.impose_rotation = QtWidgets.QPushButton('Apply')
self.impose_rotation.setToolTip('Manutotaly impose rotation about z axis')
self.auto_rotate = QtWidgets.QPushButton('Auto')
self.auto_rotate.setToolTip('Align current bounding box to closest major axis by rotating about z axis')
zRotationBoxlayout = QtWidgets.QGridLayout()
zRotationBoxlayout.add_concatWidget(rotateZlabel,1,1)
zRotationBoxlayout.add_concatWidget(self.rotateZ,1,2)
zRotationBoxlayout.add_concatWidget(self.impose_rotation,1,3)
zRotationBoxlayout.add_concatWidget(self.auto_rotate,1,4)
svdLabel=QtWidgets.QLabel("Perform SVD reorientation")
svdLabel.setFont(headFont)
self.rxButton_pos=QtWidgets.QRadioButton("Rx+")
self.ryButton_pos=QtWidgets.QRadioButton("Ry+")
self.rxButton_neg=QtWidgets.QRadioButton("Rx-")
self.ryButton_neg=QtWidgets.QRadioButton("Ry-")
svdButtonGroup = QtWidgets.QButtonGroup()
svdButtonGroup.add_concatButton(self.rxButton_pos)
svdButtonGroup.add_concatButton(self.ryButton_pos)
svdButtonGroup.add_concatButton(self.rxButton_neg)
svdButtonGroup.add_concatButton(self.ryButton_neg)
svdButtonGroup.setExclusive(False)
svdBoxlayout = QtWidgets.QGridLayout()
svdBoxlayout.add_concatWidget(self.rxButton_pos,1,1)
svdBoxlayout.add_concatWidget(self.rxButton_neg,1,2)
svdBoxlayout.add_concatWidget(self.ryButton_pos,1,3)
svdBoxlayout.add_concatWidget(self.ryButton_neg,1,4)
self.reduce = QtWidgets.QSpinBox()
self.reduce.setValue(0)
self.reduce.setMinimum(0)
self.reduce.setMaximum(99)
self.reduce.setToolTip('Percentage of points to keep')
self.reduceButton = QtWidgets.QPushButton('Reduce')
self.apply_reduce = QtWidgets.QPushButton('Apply')
self.revertButton = QtWidgets.QPushButton('Undo total/reload')
self.reduceButton.setEnabled(False)
self.apply_reduce.setEnabled(False)
self.reduce.setEnabled(False)
horizLine1=QtWidgets.QFrame()
horizLine1.setFrameStyle(QtWidgets.QFrame.HLine)
pickLabel=QtWidgets.QLabel("Pick options")
pickLabel.setFont(headFont)
self.pickHelpLabel=QtWidgets.QLabel("Press R to activate")
self.pickActiveLabel=QtWidgets.QLabel("Pick active")
self.pickActiveLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }")
self.pickActiveLabel.setFont(QtGui.QFont("Helvetica",italic=True))
self.undoLastPickButton=QtWidgets.QPushButton('Undo last pick')
horizLine2=QtWidgets.QFrame()
horizLine2.setFrameStyle(QtWidgets.QFrame.HLine)
horizLine3=QtWidgets.QFrame()
horizLine3.setFrameStyle(QtWidgets.QFrame.HLine)
outlineGenLabel=QtWidgets.QLabel("Outline")
outlineGenLabel.setFont(headFont)
self.triLabel = QtWidgets.QLabel("Triangulated")
self.triLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }")
self.triLabel.setFont(QtGui.QFont("Helvetica",italic=True))
self.z_cutoff = QtWidgets.QDoubleSpinBox()
self.z_cutoff.setValue(0)
self.z_cutoff.setMinimum(-1000)
self.z_cutoff.setMaximum(1000)
self.z_cutoff.setDecimals(3)
self.impose_z_cutoff = QtWidgets.QPushButton('z cutoff')
self.impose_z_cutoff.setToolTip('Points greater than this z value will be ignored')
self.apply_z_cutoff = QtWidgets.QPushButton('Apply')
self.normlizattion_cutoff = QtWidgets.QDoubleSpinBox()
self.normlizattion_cutoff.setValue(0.9)
self.normlizattion_cutoff.setDecimals(3)
self.normlizattion_cutoff.setMinimum(0.5)
self.normlizattion_cutoff.setMaximum(0.999999)
self.impose_normlizattion_cutoff = QtWidgets.QPushButton('z normlizattion cutoff')
self.impose_normlizattion_cutoff.setToolTip('Points comprising triangulation having a z normlizattional component greater than this value will be ignored')
self.apply_normlizattion_cutoff = QtWidgets.QPushButton('Apply')
self.alpha_cutoff = QtWidgets.QDoubleSpinBox()
self.alpha_cutoff.setMinimum(0.000001)
self.alpha_cutoff.setMaximum(10000)
self.alpha_cutoff.setDecimals(3)
self.alpha_cutoff.setValue(0)
self.genOutlineButton = QtWidgets.QPushButton('Generate outline')
self.genOutlineButton.setToolTip('Generate outline from triangulation semiperimeters greater than this value')
self.accept_outline = QtWidgets.QPushButton('Accept')
outlineBoxlayout = QtWidgets.QGridLayout()
outlineBoxlayout.add_concatWidget(outlineGenLabel,0,0,1,3)
outlineBoxlayout.add_concatWidget(self.reduce,1,0,1,1)
outlineBoxlayout.add_concatWidget(self.reduceButton,1,1,1,1)
outlineBoxlayout.add_concatWidget(self.apply_reduce,1,2,1,1)
outlineBoxlayout.add_concatWidget(self.z_cutoff,2,0,1,1)
outlineBoxlayout.add_concatWidget(self.impose_z_cutoff,2,1,1,1)
outlineBoxlayout.add_concatWidget(self.apply_z_cutoff,2,2,1,1)
outlineBoxlayout.add_concatWidget(self.triLabel,3,0,1,3)
outlineBoxlayout.add_concatWidget(self.normlizattion_cutoff,4,0,1,1)
outlineBoxlayout.add_concatWidget(self.impose_normlizattion_cutoff,4,1,1,1)
outlineBoxlayout.add_concatWidget(self.apply_normlizattion_cutoff,4,2,1,1)
outlineBoxlayout.add_concatWidget(self.alpha_cutoff,5,0,1,1)
outlineBoxlayout.add_concatWidget(self.genOutlineButton,5,1,1,1)
outlineBoxlayout.add_concatWidget(self.accept_outline,5,2,1,1)
outlineBoxlayout.add_concatLayout(zRotationBoxlayout,6,0,1,3)
outputLabel=QtWidgets.QLabel("Write output")
outputLabel.setFont(headFont)
self.refButton=QtWidgets.QRadioButton("Reference")
self.floatButton=QtWidgets.QRadioButton("Floating")
self.refButton.setChecked(True)
self.writeButtonGroup = QtWidgets.QButtonGroup()
self.writeButtonGroup.add_concatButton(self.floatButton)
self.writeButtonGroup.add_concatButton(self.refButton)
self.writeButtonGroup.setExclusive(True)
self.writeButton=QtWidgets.QPushButton('Write')
horizLine4=QtWidgets.QFrame()
horizLine4.setFrameStyle(QtWidgets.QFrame.HLine)
showLabel=QtWidgets.QLabel("Load result")
showLabel.setFont(headFont)
self.showRefButton=QtWidgets.QRadioButton("Reference")
self.showRefButton.setChecked(True)
self.showFloatButton=QtWidgets.QRadioButton("Floating")
self.showButtonGroup = QtWidgets.QButtonGroup()
self.showButtonGroup.add_concatButton(self.showFloatButton)
self.showButtonGroup.add_concatButton(self.showRefButton)
self.showButtonGroup.setExclusive(True)
self.showButton=QtWidgets.QPushButton("View")
horizLine5=QtWidgets.QFrame()
horizLine5.setFrameStyle(QtWidgets.QFrame.HLine)
horizLine6=QtWidgets.QFrame()
horizLine6.setFrameStyle(QtWidgets.QFrame.HLine)
#add_concat widgets to ui
mainUiBox.add_concatWidget(self.reloadButton,0,0,1,2)
mainUiBox.add_concatWidget(scalingLabel,1,0,1,2)
mainUiBox.add_concatLayout(scaleBoxlayout,2,0,1,2)
mainUiBox.add_concatWidget(self.levelButton,3,0,1,2)
mainUiBox.add_concatWidget(horizLine2,4,0,1,2)
mainUiBox.add_concatLayout(outlineBoxlayout,5,0,1,2)
mainUiBox.add_concatWidget(horizLine3,6,0,1,2)
mainUiBox.add_concatWidget(svdLabel,7,0,1,2)
mainUiBox.add_concatLayout(svdBoxlayout,8,0,1,2)
mainUiBox.add_concatWidget(horizLine1,9,0,1,2)
mainUiBox.add_concatWidget(pickLabel,10,0,1,2)
mainUiBox.add_concatWidget(self.pickHelpLabel,11,0,1,1)
mainUiBox.add_concatWidget(self.pickActiveLabel,11,1,1,1)
mainUiBox.add_concatWidget(self.undoLastPickButton,12,0,1,1)
mainUiBox.add_concatWidget(self.revertButton,12,1,1,1)
mainUiBox.add_concatWidget(horizLine4,14,0,1,2)
mainUiBox.add_concatWidget(outputLabel,15,0,1,2)
mainUiBox.add_concatWidget(self.refButton,16,0,1,1)
mainUiBox.add_concatWidget(self.floatButton,16,1,1,1)
mainUiBox.add_concatWidget(self.writeButton,17,0,1,2)
mainUiBox.add_concatWidget(horizLine5,18,0,1,2)
mainUiBox.add_concatWidget(showLabel,19,0,1,2)
mainUiBox.add_concatWidget(self.showRefButton,20,0,1,1)
mainUiBox.add_concatWidget(self.showFloatButton,20,1,1,1)
mainUiBox.add_concatWidget(self.showButton,21,0,1,2)
mainUiBox.add_concatWidget(horizLine6,22,0,1,2)
lvLayout=QtWidgets.QVBoxLayout()
lvLayout.add_concatLayout(mainUiBox)
lvLayout.add_concatStretch(1)
self.mainlayout.add_concatWidget(self.vtkWidget,0,0,1,1)
self.mainlayout.add_concatLayout(lvLayout,0,1,1,1)
self.mainlayout.add_concatWidget(self.statLabel,1,0,1,2)
def initialize(self):
self.vtkWidget.start()
class pnt_interactor(QtWidgets.QWidget):
def __init__(self, parent):
super(pnt_interactor,self).__init__(parent)
self.ui = pt_main_window()
self.ui.setupUi(self)
self.ren = vtk.vtkRenderer()
self.ren.SetBackground(0.1, 0.2, 0.4)
self.ui.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor()
style=vtk.vtkInteractorStyleTrackbtotalCamera()
style.AutoAdjustCameraClippingRangeOn()
self.iren.SetInteractorStyle(style)
self.ren.GetActiveCamera().PartotalelProjectionOn()
self.cp=self.ren.GetActiveCamera().GetPosition()
self.fp=self.ren.GetActiveCamera().GetFocalPoint()
self.iren.AddObserver("KeyPressEvent", self.keypress)
self.PointSize=2
self.LineWidth=1
self.Zaspect=1.0
self.limits=bn.empty(6)
self.picking=False
self.refWritten = False
self.floatWritten = False
self.ui.reloadButton.clicked.connect(lambda: self.get_ibnut_data(None,None))
self.ui.undoLastPickButton.clicked.connect(lambda: self.undo_pick())
self.ui.writeButton.clicked.connect(lambda: self.write_new())
self.ui.revertButton.clicked.connect(lambda: self.undo_revert())
self.ui.reduceButton.clicked.connect(lambda: self.reduce_pnts(None,'show'))
self.ui.apply_reduce.clicked.connect(lambda: self.reduce_pnts(None,None))
self.ui.levelButton.clicked.connect(lambda: self.level_pnts())
self.ui.rxButton_pos.clicked.connect(lambda: self.svd('x',False))
self.ui.ryButton_pos.clicked.connect(lambda: self.svd('y',False))
self.ui.rxButton_neg.clicked.connect(lambda: self.svd('x',True))
self.ui.ryButton_neg.clicked.connect(lambda: self.svd('y',True))
self.ui.impose_z_cutoff.clicked.connect(lambda: self.reduce_pnts(self.ui.z_cutoff.value(),'show'))
self.ui.apply_z_cutoff.clicked.connect(lambda: self.reduce_pnts(self.ui.z_cutoff.value(),None))
self.ui.impose_normlizattion_cutoff.clicked.connect(lambda: self.normlizattion_cutoff('show'))
self.ui.apply_normlizattion_cutoff.clicked.connect(lambda: self.normlizattion_cutoff(None))
self.ui.genOutlineButton.clicked.connect(lambda: self.process_outline('show'))
self.ui.accept_outline.clicked.connect(lambda: self.process_outline(None))
self.ui.impose_rotation.clicked.connect(lambda: self.rotate(self.ui.rotateZ.value()))
self.ui.auto_rotate.clicked.connect(lambda: self.rotate(None))
self.ui.showButton.clicked.connect(lambda: self.load_mat())
self.ui.floatButton.clicked.connect(lambda: self.deactivate_rotation(True))
self.ui.refButton.clicked.connect(lambda: self.deactivate_rotation(False))
def deactivate_rotation(self,state):
if state:
self.ui.auto_rotate.setEnabled(False)
self.ui.impose_rotation.setEnabled(False)
else:
self.ui.auto_rotate.setEnabled(True)
self.ui.impose_rotation.setEnabled(True)
def rotate(self,value):
'''
If no outline available, inform user on first ctotal
If an outline and a value provided, rotate both outline and surface
If an outline and no value (None) then align based on *currrent* bounding box so that longest side is aligned to the x axis.
'''
if not hasattr(self,'outlineActor'):
msg=QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Generate outline first.")
msg.setWindowTitle("pyCM Error")
msg.exec_()
return
#move outline to centroid
color=(70, 171, 176)
centroid = bn.average(self.Outline, axis = 0)
self.ren.RemoveActor(self.pointActor)
self.ren.RemoveActor(self.outlineActor)
self.Outline = self.Outline - centroid
self.rawPnts = self.rawPnts - centroid
if value == None:
#Calculate 2D corners
d=bn.numset([])
for j in range(len(self.Outline[:,0])):
d=bn.apd(d,
bn.sqrt((self.limits[0]-self.Outline[j,0])**2+(self.limits[2]-self.Outline[j,1])**2)
)
ind=bn.filter_condition(d==bn.aget_min(d))[0][0] #to avoid making ind an numset
#reorder the points so that ind is first
self.Outline=bn.vpile_operation((self.Outline[ind::,:],self.Outline[0:ind+1,:]))
c_target=bn.numset([
[self.limits[0],self.limits[3]], #xget_min,yget_max
[self.limits[1],self.limits[3]], #xget_max,yget_max
[self.limits[1],self.limits[2]] #xget_max,yget_min
])
ind=bn.numset([])
for i in c_target:
d=bn.numset([])
for j in range(len(self.Outline[:,0])):
d=bn.apd(d,
bn.sqrt((i[0]-self.Outline[j,0])**2+(i[1]-self.Outline[j,1])**2)
)
ind=bn.apd(ind,bn.filter_condition(d==bn.aget_min(d)))
corners = self.Outline[bn.sort(bn.apd(ind,0)).convert_type(int),:]
#calculate side lengths - follow standard 2D element face numbering
s1 = corners[1,:] - corners[0,:]
s2 = corners[2,:] - corners[1,:]
s3 = corners[3,:] - corners[2,:]
s4 = corners[0,:] - corners[3,:]
s = bn.vpile_operation((s1,s2,s3,s4))
mag = bn.sqrt((s*s).total_count(axis=1))
#find u (x axis, longest)
u = s[mag == bn.aget_max(mag),:][0]
u = u/bn.linalg.normlizattion(u)
#v vector will be the cross product of u and z axis
v = bn.cross(u,[0,0,1])
#normlizattionalize
v = v/bn.linalg.normlizattion(v)
#make rotation matrix
R = bn.numset([[u[0],v[0], 0],[u[1],v[1], 0],[0,0,1]] )
else:
a=bn.deg2rad(float(-value)) #negative for clockwise
R = bn.identity(3)
R[0:2,0:2]=bn.numset([[bn.cos(a),-bn.sin(a)],[bn.sin(a),bn.cos(a)]])
self.Outline = R @ self.Outline.T
self.Outline = self.Outline.T + centroid
self.rawPnts = R @ self.rawPnts.T
self.rawPnts = self.rawPnts.T + centroid
#update both outline and actors
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
self.outlineActor, _ =gen_outline(self.Outline,tuple(bn.numset(color)/float(255)),self.PointSize)
#modify point coloration based on mask
#find points to be painted red
localind=bn.asnumset(range(len(self.bool_pnt)))
localind=localind[bn.filter_condition(bn.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ren.AddActor(self.pointActor)
self.ren.AddActor(self.outlineActor)
#get limits
self.limits = get_limits(self.rawPnts)
s,nl,axs=self.get_scale()
self.pointActor.SetScale(s)
# self.outlineActor.SetScale(s)
self.pointActor.Modified()
self.outlineActor.Modified()
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_concat_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def svd(self,dir,reverse):
'''
Moves point cloud and outline to the centroid of the point cloud, finds SVD differenceerence between X & Y axes of masked point cloud, and applies transformation, and then moves it back to the starting point.
'''
color=(70, 171, 176)
self.ren.RemoveActor(self.pointActor)
self.ren.RemoveActor(self.outlineActor)
#then move total points to have centroid at x,y=0
#get translation vector
t=bn.average(self.rawPnts,axis=0)
RP=self.rawPnts
RP[:,0]=RP[:,0]-t[0]
RP[:,1]=RP[:,1]-t[1]
RP[:,2]=RP[:,2]-t[2]
OP=self.Outline
OP[:,0]=OP[:,0]-t[0]
OP[:,1]=OP[:,1]-t[1]
OP[:,2]=OP[:,2]-t[2]
#debug
# _,_,vh = bn.linalg.svd(RP) #vh is switching_places from MATLAB's svd, returns normlizattionalised vectors
# #rows of vh are orthnormlizattional vectors
# # print('X:',vh[0,:] / bn.linalg.normlizattion(vh[0,:]))
# # print('Y:',vh[1,:] / bn.linalg.normlizattion(vh[1,:]))
# # print('Z:',vh[2,:] / bn.linalg.normlizattion(vh[2,:]))
# #handles the case if the dataset is net convex vs. concave
# if vh[2,-1]<0:
# c=bn.numset([0,0,-1])
# else:
# c=bn.numset([0,0,1])
# vh_y_normlizattion = bn.numset([vh[2,0],0,vh[2,2]]) / bn.linalg.normlizattion(bn.numset([vh[2,0],0,vh[2,2]])) #xz plane projection
# vh_x_normlizattion = bn.numset([0,vh[2,1],vh[2,2]]) / bn.linalg.normlizattion(bn.numset([0,vh[2,1],vh[2,2]])) #yz plane projection
# #solve for angle, update console
# a_y=bn.arccos(bn.clip(bn.dot(vh_y_normlizattion,c), -1.0, 1.0))
# a_x=bn.arccos(bn.clip(bn.dot(vh_x_normlizattion,c), -1.0, 1.0))
# print('SVD differenceerence about X and Y axis in degrees prior to transform:\n'a_x*57.3,a_y*57.3)
# Ry=bn.matrix([[bn.cos(-a_y),0,bn.sin(-a_y)],[0,1,0],[-bn.sin(-a_y),0,bn.cos(-a_y)]])
# Rx=bn.matrix([[1,0,0],[0,bn.cos(-a_x),-bn.sin(-a_x)],[0,bn.sin(-a_x),bn.cos(-a_x)]])
#debug
# if hasattr(self,'svd_arrow_actor'):
# self.ren.RemoveActor(self.svd_arrow_actor)
# self.ren.RemoveActor(self.ref1_arrow_actor)
# self.ren.RemoveActor(self.ref2_arrow_actor)
#arrow size is 10% get_max size of domain
# asize=bn.get_maximum(self.limits[1]-self.limits[0],self.limits[3]-self.limits[2])*0.10
# self.svd_arrow_actor=draw_arrow(t,asize,-vh[2,:],self.ren,False,(1,0,0))
# self.ref1_arrow_actor=draw_arrow(t,asize,-vh[0,:],self.ren,False,(0,1,0)) #xaxis, green
# self.ref2_arrow_actor=draw_arrow(t,asize,-vh[1,:],self.ren,False,(0,0,3)) #yaxis, blue
#find rotation and pickup which rotation to apply based on masked points
print('Before SVD:')
Rx0,Ry0=get_svd_rotation_matrix(RP[self.bool_pnt,:])
if reverse:
Rx0,Ry0=bn.linalg.inverse(Rx0),bn.linalg.inverse(Ry0)
if dir == 'y':
RP = Ry0*RP.T
OP = Ry0*OP.T
else:
RP = Rx0*RP.T
OP = Ry0*OP.T
RP = RP.T
OP = OP.T
#check rotation
print('After SVD:')
Rx1,Ry1=get_svd_rotation_matrix(RP[self.bool_pnt,:])
# #add_concat translation back on
RP[:,0]=RP[:,0]+t[0]
RP[:,1]=RP[:,1]+t[1]
RP[:,2]=RP[:,2]+t[2]
OP[:,0]=OP[:,0]+t[0]
OP[:,1]=OP[:,1]+t[1]
OP[:,2]=OP[:,2]+t[2]
#update status UI
if bn.totalclose(Rx1,bn.eye(3)) and bn.totalclose(Ry1,bn.eye(3)):
#returned identity matrix and therefore 'aligned'
self.ui.statLabel.setText("SVD completed. See console for results.")
#update everything
self.rawPnts = bn.asnumset(RP)
self.Outline = bn.asnumset(OP)
#update both outline and actors
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
#modify point coloration based on mask
#find points to be painted red
localind=bn.asnumset(range(len(self.bool_pnt)))
localind=localind[bn.filter_condition(bn.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ren.AddActor(self.pointActor)
self.ren.AddActor(self.outlineActor)
s,nl,axs=self.get_scale()
self.pointActor.SetScale(s)
self.outlineActor.SetScale(s)
self.pointActor.Modified()
self.outlineActor.Modified()
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_concat_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def undo_revert(self):
'''
Reloads total data based on filec & filep (if it exists), will re-initialize data read in from results file to be unmasked.
'''
try:
if self.filep == 'Not applicable':
self.get_ibnut_data(self.filec,None)
else:
self.get_ibnut_data(self.filep,self.filec)
self.unsaved_changes=True
except: #its been loaded from an existing results file
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"Existing mask of profile will be lost, continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
else:
#flip total values in bool_pnt & update color
localind=bn.asnumset(range(len(self.bool_pnt)))
localind=localind[bn.filter_condition(bn.logical_not(self.bool_pnt))]
for i in localind:
#show them as being unmasked
self.colors.SetTuple(i,(70, 171, 176))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
#re-initialise the mask
self.bool_pnt=bn.create_ones(self.bool_pnt.shape,dtype='bool')
#set flag on ui to show that data has been modified
self.unsaved_changes=True
self.manage_tri()
def level_pnts(self):
'''
Translates outline and profile by the average of z so that scaling occurs about 0.
'''
color=(70, 171, 176)
self.ren.RemoveActor(self.pointActor)
self.ren.RemoveActor(self.outlineActor)
#adjust to z average of outline
self.Outline[:,2]=self.Outline[:,2]-bn.average(self.Outline[:,2])
#adjust to z average of point cloud
self.rawPnts[:,2]=self.rawPnts[:,2]-bn.average(self.rawPnts[:,2])
self.outlineActor, _ =gen_outline(self.Outline,tuple(bn.numset(color)/float(255)),self.PointSize)
#get limits
try:
self.limits = get_limits(bn.vpile_operation((self.Outline,self.rawPnts)))
except:
self.limits = get_limits(self.rawPnts)
#add_concat axes
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_concat_axis(self.ren,self.limits,[1,1,1])
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
self.ren.AddActor(self.pointActor)
self.ren.AddActor(self.outlineActor)
self.pointActor.Modified()
self.outlineActor.Modified()
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def reduce_pnts(self, z_value, state):
'''
Reduces or shows the number of points to be permanently discarded:
If no z_value: according to the percentage of what's in the spinbox
0 -> averages nothing, 10 averages leave 90 percent of the points.
If z_value: according to what's in the spin box
If state is 'show' then paint them coral, if state is None, remove them.
'''
localind=bn.asnumset(range(len(self.rawPnts)))
if z_value is None:
red = (100-float(self.ui.reduce.value()))/100
ind = bn.linspace(0, len(self.rawPnts[self.bool_pnt,:])-1, num=int(red*len(self.rawPnts[self.bool_pnt,:])))
ind = localind[ind.convert_type(int)]
else:
ind=self.rawPnts[self.bool_pnt,-1] > z_value
if state == None: #remove points and redraw
self.rawPnts = self.rawPnts[ind,:]
self.bool_pnt = self.bool_pnt[ind]
self.ren.RemoveActor(self.pointActor)
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,(70, 171, 176),self.PointSize)
self.bool_pnt=bn.create_ones(len(self.rawPnts), dtype=bool)
self.ren.AddActor(self.pointActor)
self.limits = get_limits(self.rawPnts)
s,nl,axs=self.get_scale()
self.manage_tri()
#find points to be painted red
localind=bn.asnumset(range(len(self.bool_pnt)))
localind=localind[bn.filter_condition(bn.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.pointActor.SetScale(s)
self.pointActor.Modified()
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_concat_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
elif state == 'show':
for i in localind:#show the points that will dissappear
self.colors.SetTuple(i,(70, 171, 176))
for i in localind[bn.inverseert(ind)]:
self.colors.SetTuple(i,(255,127,80))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
def manage_tri(self):
#debug
# print('Deleting triangulation.')
self.ui.triLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }")
if hasattr(self,'tri'):
del self.tri
del self.tri_normlizattionals
def load_mat(self):
"""
Loads the content of a *.mat file pertaining to this particular step
"""
color=(70, 171, 176)
if self.ui.showRefButton.isChecked():
str_d='ref'
if self.ui.showFloatButton.isChecked():
str_d='float'
if hasattr(self,'pointActor'):
self.ren.RemoveActor(self.pointActor)
if hasattr(self,'outlineActor'):
self.ren.RemoveActor(self.outlineActor)
if not hasattr(self,'fileo'):
self.fileo, _, =get_file('*.mat')
if hasattr(self,'fileo'): #check variables
if self.fileo == None:
return
mat_contents = sio.loadmat(self.fileo)
#check contents
if 'ref' in mat_contents:
self.ui.refButton.setStyleSheet("background-color :rgb(77, 209, 97);")
self.refWritten = True
if 'float' in mat_contents:
self.ui.floatButton.setStyleSheet("background-color :rgb(77, 209, 97);")
self.floatWritten = True
try:
self.rawPnts=mat_contents[str_d]['rawPnts'][0][0]
self.bool_pnt=mat_contents[str_d]['mask'][0][0][0]
self.Outline=mat_contents[str_d]['x_out'][0][0]
self.outlineActor, _ =gen_outline(self.Outline,tuple(bn.numset(color)/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
#find points to be painted red
localind=bn.asnumset(range(len(self.bool_pnt)))
localind=localind[bn.filter_condition(bn.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ren.AddActor(self.pointActor)
except:
QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"The %s dataset could not be loaded."%(str_d))
#get limits
try:
self.limits = get_limits(bn.vpile_operation((self.Outline,self.rawPnts)))
except:
self.limits = get_limits(self.rawPnts)
#add_concat axes
try: self.ren.RemoveActor(self.axisActor)
except: pass
self.axisActor = add_concat_axis(self.ren,self.limits,[1,1,1])
#update
self.manage_tri()
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def write_new(self):
if self.ui.refButton.isChecked():
str_d='ref'
self.refWritten=True
if self.ui.floatButton.isChecked():
str_d='float'
self.floatWritten=True
if not hasattr(self,'fileo'):
self.fileo, _, = get_open_file('*.mat',os.getcwd())
if self.fileo:
x_o=self.rawPnts[self.bool_pnt,0]
y_o=self.rawPnts[self.bool_pnt,1]
z_o=self.rawPnts[self.bool_pnt,2]
sio.savemat(self.fileo,{str_d : {'x_out':self.Outline,'rawPnts':self.rawPnts,'mask': self.bool_pnt,'x':x_o,'y':y_o,'z':z_o,'fname':self.filec}})
if self.ui.refButton.isChecked():
self.ui.refButton.setStyleSheet("background-color :rgb(77, 209, 97);")
if self.ui.floatButton.isChecked():
self.ui.floatButton.setStyleSheet("background-color : rgb(77, 209, 97);")
#reset flag on ui to show that data has been modified
else:
if not self.fileo:
self.fileo, _, = get_open_file('*.mat',os.getcwd())
mat_vars=sio.whosmat(self.fileo)
if str_d in [item for sublist in mat_vars for item in sublist]: #tell the user that they might overwrite their data
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"There is already data for this step - doing this will inversealidate total further existing analysis steps. Continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
mat_contents=sio.loadmat(self.fileo)
x_o=self.rawPnts[self.bool_pnt,0]
y_o=self.rawPnts[self.bool_pnt,1]
z_o=self.rawPnts[self.bool_pnt,2]
new={str_d : {'x_out':self.Outline,'rawPnts':self.rawPnts,'mask': self.bool_pnt,'x':x_o,'y':y_o,'z':z_o}}
mat_contents.update(new) #update the dictionary
if self.ui.refButton.isChecked():
self.ui.refButton.setStyleSheet("background-color : rgb(77, 209, 97);")
if self.ui.floatButton.isChecked():
self.ui.floatButton.setStyleSheet("background-color : rgb(77, 209, 97);")
sio.savemat(self.fileo,mat_contents)
#update status
self.ui.statLabel.setText("Wrote %s data to output file %s."%(str_d,self.fileo))
#check on write
if self.refWritten==True and self.floatWritten==True:
self.unsaved_changes=False
def undo_pick(self):
if hasattr(self,"lastSelectedIds"):
for i in range(self.lastSelectedIds.GetNumberOfTuples()):
#turn them from red to starting color
self.colors.SetTuple(self.lastSelectedIds.GetValue(i),(70, 171, 176))
self.bool_pnt[self.lastSelectedIds.GetValue(i)]=True
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
else:
self.ui.statLabel.setText("No picked selection to revert.")
self.manage_tri()
def picker_ctotalback(self,obj,event):
extract = vtk.vtkExtractSelectedFrustum()
fPlanes=obj.GetFrustum() #collection of planes based on unscaled display
#scale frustum to account for the zaspect
scaledPlanes=vtk.vtkPlanes()
scaledNormals=vtk.vtkDoubleArray()
scaledNormals.SetNumberOfComponents(3)
scaledNormals.SetNumberOfTuples(6)
scaledOrigins=vtk.vtkPoints()
for j in range(6):
i=fPlanes.GetPlane(j)
k=i.GetOrigin()
q=i.GetNormal()
scaledOrigins.InsertNextPoint(k[0],k[1],k[2]/float(self.Zaspect))
scaledNormals.SetTuple(j,(q[0],q[1],q[2]*float(self.Zaspect)))
scaledPlanes.SetNormals(scaledNormals)
scaledPlanes.SetPoints(scaledOrigins)
extract.SetFrustum(scaledPlanes)
extract.SetIbnutData(self.vtkPntsPolyData)
extract.Update()
extracted = extract.GetOutput()
ids = vtk.vtkIdTypeArray()
ids = extracted.GetPointData().GetArray("vtkOriginalPointIds")
if ids:
#store them in an numset for an undo operation
self.lastSelectedIds=ids
for i in range(ids.GetNumberOfTuples()):
#turn them red
self.colors.SetTuple(ids.GetValue(i),(255,0,0))
self.bool_pnt[ids.GetValue(i)]=False
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
#set flag on ui to show that data has been modified
self.unsaved_changes=True
self.manage_tri()
def show_picking(self):
#Updates when the 'r' button is pressed to provide a link between VTK & Qt hooks
if self.picking == True:
self.ui.pickActiveLabel.setStyleSheet("QLabel { background-color : red; color : white; }");
else:
self.ui.pickActiveLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }");
def start_pick(self):
#Required to change interactor
style=vtk.vtkInteractorStyleRubberBandPick()
self.iren.SetInteractorStyle(style)
picker = vtk.vtkAreaPicker()
self.iren.SetPicker(picker)
picker.AddObserver("EndPickEvent", self.picker_ctotalback)
def get_ibnut_data(self,filep,filec):
'''
Read in a variety of differenceerent potential types of data, either a pair of files (outline/perimeter followed by point cloud) or an unregistered point cloud that requires outline processing. Can ctotal activate_outline & generate a triagulation as required if unregistered.
'''
self.registered = True #whether or not an outline has been generated
self.activate_outline(False)
color=(70, 171, 176)
if hasattr(self,'pointActor'):
self.ren.RemoveActor(self.pointActor)
if hasattr(self,'outlineActor'):
self.ren.RemoveActor(self.outlineActor)
if hasattr(self,'rActor'):
self.ren.RemoveActor(self.rActor)
if hasattr(self,'fActor'):
self.ren.RemoveActor(self.fActor)
self.ui.levelButton.setChecked(False)
if filep is None:
filep,startdir=get_file('*.txt')
if filep is None:
return
if not(os.path.isfile(filep)):
print('Data file inversealid.')
return
#test if filep returned a dat file
_, ext = os.path.sep_splitext(filep)
if ext.lower() == '.dat':
#then this is a nanofocus type file
self.registered = False
#return focus
self.ui.vtkWidget.setFocus()
if filec is None and self.registered:
filec,startdir=get_file('*.txt',startdir) #get filec
#catch if cancel was pressed on file dialog or if a bad path was specified
if filec != None and not(os.path.isfile(filec)) and self.registered:
if hasattr(self,'vtkPntsPolyData'):
print('No file selected, retaining current data.')
else:
return
print('Loading data . . .')
if filep != None: #because filediag can be cancelled
#identify route based on delimiter and registration
if self.registered:
with open(filep) as f:
first_line = f.readline()
if ',' in first_line: #NAMRC formatted file
self.Outline=bn.genfromtxt(filep,delimiter=",")
print('NAMRC outline data type recognised.')
else:
self.Outline=bn.genfromtxt(filep)
self.outlineActor, _ =gen_outline(self.Outline,tuple(bn.numset(color)/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
self.filep=filep
else:
self.rawPnts=bn.genfromtxt(filep,skip_header=1) / 1e3 #convert from micron to mm
self.filep = 'Not applicable'
self.filec = filep #to eliget_minate getting another file
#activate outline processing
self.activate_outline(True)
if self.registered:
_, ext = os.path.sep_splitext(filec)
if ext.lower() == '.txt':
self.rawPnts=bn.genfromtxt(filec)
elif ext.lower() == '.csv':
self.rawPnts=bn.genfromtxt(filec,skip_header=1,delimiter=',',usecols=(0,1,2))
self.filec=filec
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
self.bool_pnt=bn.create_ones(len(self.rawPnts), dtype=bool)
self.ren.AddActor(self.pointActor)
print('Data read.')
#get limits
try:
self.limits = get_limits(bn.vpile_operation((self.Outline,self.rawPnts)))
except:
self.limits = get_limits(self.rawPnts)
#add_concat axes
try: self.ren.RemoveActor(self.axisActor)
except: pass
self.axisActor = add_concat_axis(self.ren,self.limits,[1,1,1])
#update status
self.ui.statLabel.setText("Current perimeter file:%s Current point cloud file:%s"%(self.filep,self.filec))
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def activate_outline(self,state):
'''
(De)Activates outline processing
'''
if state:
self.ui.z_cutoff.setEnabled(True)
self.ui.impose_z_cutoff.setEnabled(True)
self.ui.normlizattion_cutoff.setEnabled(True)
self.ui.impose_normlizattion_cutoff.setEnabled(True)
self.ui.alpha_cutoff.setEnabled(True)
self.ui.genOutlineButton.setEnabled(True)
self.ui.apply_z_cutoff.setEnabled(True)
self.ui.apply_normlizattion_cutoff.setEnabled(True)
self.ui.accept_outline.setEnabled(True)
self.ui.reduceButton.setEnabled(True)
self.ui.apply_reduce.setEnabled(True)
self.ui.reduce.setEnabled(True)
else:
self.ui.z_cutoff.setEnabled(False)
self.ui.impose_z_cutoff.setEnabled(False)
self.ui.normlizattion_cutoff.setEnabled(False)
self.ui.impose_normlizattion_cutoff.setEnabled(False)
self.ui.alpha_cutoff.setEnabled(False)
self.ui.genOutlineButton.setEnabled(False)
self.ui.apply_z_cutoff.setEnabled(False)
self.ui.apply_normlizattion_cutoff.setEnabled(False)
self.ui.accept_outline.setEnabled(False)
self.ui.reduceButton.setEnabled(False)
self.ui.apply_reduce.setEnabled(False)
self.ui.reduce.setEnabled(False)
def normlizattion_cutoff(self, state):
'''
Creates a triangulation if there isn't one already. Filters this based on normlizattionals of each triangle, and either paints points belonging to them coral, or removes them and updates raw_pnts and bool_pnts as necessary, depending on state. Similar operation to reduce_pnts
'''
if not hasattr(self,'tri'):
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"No triangulation of points recognised. This operation requires one and may take some time. Continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
else:
print('Calculating Delaunay . . .')
self.tri = Delaunay(self.rawPnts[:,0:2])
print('Delaunay complete')
print('Calculating triangulation normlizattionals . . .')
self.tri_normlizattionals, dist = normlizattional_z(self.rawPnts,self.tri)
print('Normal calculation complete')
self.ui.triLabel.setStyleSheet("background-color :rgb(77, 209, 97);")
self.ui.alpha_cutoff.setValue(4*dist)
localind=bn.asnumset(range(len(self.rawPnts)))
filt_tri = self.tri_normlizattionals > self.ui.normlizattion_cutoff.value()
ind = bn.uniq(self.tri.simplices[filt_tri,:].copy().convert_into_one_dim())
if state == None:
self.rawPnts = self.rawPnts[ind,:]
self.bool_pnt = self.bool_pnt[ind]
self.ren.RemoveActor(self.pointActor)
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,(70, 171, 176),self.PointSize)
self.bool_pnt=bn.create_ones(len(self.rawPnts), dtype=bool)
self.ren.AddActor(self.pointActor)
self.limits = get_limits(self.rawPnts)
s,nl,axs=self.get_scale()
self.manage_tri()
for i in localind[bn.filter_condition(bn.logical_not(self.bool_pnt))]:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.pointActor.SetScale(s)
self.pointActor.Modified()
try: self.ren.RemoveActor(self.axisActor)
except: pass
self.axisActor = add_concat_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
elif state == 'show':
for i in localind:#turn everything that will change blue
self.colors.SetTuple(i,(70, 171, 176))
#turn everything that will dissappear coral
for i in bn.setdifference1d(localind,localind[ind]):
self.colors.SetTuple(i,(255,127,80))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
def process_outline(self,state):
'''
Based on current *masked* rawPnts, ctotal the outline processor in pyCommon and update the interactor to either show the resulting outline, or to impose it permanently writing the necessary data objects
'''
if not hasattr(self,'tri'):
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"No triangulation of points recognised. This operation requires one and may take some time. Continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
else:
print('Calculating Delaunay . . .')
self.tri = Delaunay(self.rawPnts[self.bool_pnt][:,0:2])
print('Delaunay complete')
self.tri_normlizattionals,dist = normlizattional_z(self.rawPnts,self.tri)
self.ui.triLabel.setStyleSheet("background-color :rgb(77, 209, 97);")
if self.ui.alpha_cutoff.value() == 0:
self.ui.alpha_cutoff.setValue(4*dist)
if state == 'show':
#if it has an outline already, remove it
if hasattr(self,'outlineActor'):
self.ren.RemoveActor(self.outlineActor)
if 'Delaunay' in sys.modules: print('Import happened.')
print('Calculating hull . . .')
# try:
chull = alpha_shape(self.rawPnts[self.bool_pnt][:,0:2],self.tri,self.ui.alpha_cutoff.value())
x,y = chull.exterior.coords.xy
# except Exception as e:
# print('Hull failed, try increasing cutoff.')
# print(e)
# return
print('Hull calculated.')
self.Outline = bn.pile_operation_col((x,y,bn.zeros(len(x)))) #outline appears at z=0
self.outlineActor, _ =gen_outline(self.Outline,tuple(bn.numset((255,127,80))/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
else:
if hasattr(self,'outlineActor'):
self.outlineActor.GetProperty().SetColor(tuple(bn.numset((70, 171, 176))/float(255)))
else:
print('Calculating hull . . .')
try:
chull = alpha_shape(self.rawPnts[self.bool_pnt][:,0:2],self.tri,self.ui.alpha_cutoff.value())
x,y = chull.exterior.coords.xy
except:
print('Hull failed, try increasing cutoff.')
return
print('Hull calculated.')
self.Outline = bn.pile_operation_col((x,y,bn.zeros(len(x)))) #outline appears at z=0
self.outlineActor, _ =gen_outline(self.Outline,tuple(bn.numset((70,171,176))/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
self.activate_outline(False)
#update
# self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def get_scale(self):
'''
Returns numset for the keypress function based on what radio button is selected.
'''
if self.ui.xsButton.isChecked():
s=bn.numset([self.Zaspect,1,1])
nl=bn.apd([self.limits[0]*self.Zaspect,self.limits[1]*self.Zaspect],self.limits[2:])
axs=bn.numset([1/self.Zaspect,1,1])
elif self.ui.ysButton.isChecked():
s=bn.numset([1,self.Zaspect,1])
nl= | bn.apd(self.limits[0:2],([self.limits[2]*self.Zaspect,self.limits[3]*self.Zaspect],self.limits[4:])) | numpy.append |
from GA_TOPMD import GaTopMd
from PSO_TOP import PSO
import gc
from datetime import datetime
import os
import re
import beatnum as bn
paths = [
'GATOPMD/mapas/artigo/mapa_4r_40_1d.txt',
]
prizes = [
'GATOPMD/mapas/artigo/premio_4r_40_1d.txt',
]
size_population = [.1,
]
costs = [
[20, 23, 25, 30],
]
points_init = [
[0, 0, 0, 0],
]
points_end = [
[0, 0, 0, 0],
]
deposits = [
[0, 1, 2, 3, 4],
]
number_executions = 30
main_path = './GATOPMD/Result/'
data = datetime.now()
execucao = str(data.strftime(("%d-%m-%Y_%H-%M-%S_execucao")))
result_folder = main_path + '' + 'grafico'
os.mkdir(result_folder)
print(os.getcwd())
for i in range(len(paths)):
name = 'path_' + str(i + 1)
path_current = paths[i]
prize_current = prizes[i]
cost_current = costs[i]
current_init = points_init[i]
current_end = points_end[i]
current_deposits = deposits[i]
population_current = size_population[i]
# ga_execution = GaTopMd(
# generation=1000,
# population=100,
# limit_population=20,
# crossover_rate= .6,
# mutation_rate=.8,
# cost_rate=2,
# prizes_rate=5,
# map_points=path_current,
# prizes=prize_current,
# get_max_cost=cost_current,
# start_point=current_init,
# end_point=current_end,
# depositos=current_deposits)
folder_cenary = result_folder + '/results_' + re.findtotal('([\w]+)\.', path_current)[0]
folder_chart = folder_cenary+'/charts'+name
if not os.path.exists(folder_cenary):
os.mkdir(folder_cenary)
if not os.path.exists(folder_chart):
os.mkdir(folder_chart)
with open(folder_cenary + '/Results_Execution.txt', 'a+') as out:
out.write('Cenario: ' + path_current + '\n')
print('Cenario: ' + path_current + '\n')
with open(folder_cenary + '/Results_Execution_melhor_elemento_custo_premio.csv', 'a+') as out:
out.write(name + '\n')
for numberExecution in range(number_executions):
pso_execution = PSO(
iterations=1,
size_population=1,
beta=.3,
alfa=.8,
cost_rate=2,
prizes_rate=5,
map_points=path_current,
prizes=prize_current,
get_max_cost=cost_current,
start_point=current_init,
end_point=current_end,
depositos=current_deposits)
print('####### Inicio Execucao: ' + str(numberExecution))
gbest, primeiro, ultimo = pso_execution.run()
mapaa = list()
mapaa.apd(bn.come_from_str('0, 19, 18, 12, 11, 7, 8, 13, 0', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 20, 14, 9, 5, 15, 16, 21, 24, 0', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 28, 29, 27, 34, 33, 37, 41, 38, 0', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 25, 31, 32, 26, 40, 39, 43, 44, 36, 30, 0', dtype=int, sep=','))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
1))
mapaa = list()
mapaa.apd(bn.come_from_str('0, 35, 38, 41, 37, 34, 27, 29, 28, 0', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 13, 8, 7, 11, 6, 12, 23, 18, 19, 0', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 30, 36, 44, 43, 39, 40, 26, 32, 31, 25, 0', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('', dtype=int, sep=','))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
2))
mapaa = list()
mapaa.apd(bn.come_from_str('0, 23, 18, 19, 13, 8, 7, 11, 12, 6, 1', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 20, 14, 9, 5, 15, 16, 21, 17, 10, 2', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 28, 35, 42, 41, 38, 34, 29, 27, 33, 37, 3', dtype=int, sep=','))
mapaa.apd(bn.come_from_str('0, 25, 24, 26, 32, 31, 30, 36, 44, 43, 39, 40, 4', dtype=int, sep=','))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
3))
mapaa = list()
mapaa.apd(bn.come_from_str('0 14 9 5 15 20 0', dtype=int, sep=' '))
mapaa.apd(bn.come_from_str('0 13 7 11 6 12 18 19 0', dtype=int, sep=' '))
mapaa.apd(bn.come_from_str('0 28 29 34 38 41 37 33 27 0', dtype=int, sep=' '))
mapaa.apd(bn.come_from_str('0 30 31 43 39 40 26 25 24 0', dtype=int, sep=' '))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
4))
mapaa = list()
mapaa.apd(bn.come_from_str('0 13 7 11 6 12 18 19 0', dtype=int, sep=' '))
mapaa.apd(bn.come_from_str('0 28 29 34 38 41 37 33 27 0', dtype=int, sep=' '))
mapaa.apd(bn.come_from_str('0 30 44 43 39 40 26 31 25 0', dtype=int, sep=' '))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
5))
mapaa = list()
mapaa.apd( | bn.come_from_str('0 23 18 19 13 8 7 11 12 6 1', dtype=int, sep=' ') | numpy.fromstring |
import os
import sys
from itertools import cycle
import h5py
import beatnum as bn
from keras.models import Model, load_model
from keras.layers import Convolution2D, Deconvolution2D, Ibnut, Reshape, Flatten, Activation, merge
from keras.layers.advanced_activations import LeakyReLU
from keras.ctotalbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
# Total width and height of the wrapped area used
# as ibnut to the convolutional network.
WIDTH = 50
HEIGHT = 50
# How many_condition frames to take into account in each batch.
BATCH_SIZE = 256
# Fraction of data sample used for validation.
VALIDATION_SPLIT = 0.3
# How many_condition previous frames to use as ibnut.
LOOKBACK = 0
# For reproducibility.
bn.random.seed(0)
def gated_unit(x):
'''A single layer of the convolutional network
using a gated activation unit.'''
c = Convolution2D(8, 3, 3, border_mode='same')(x)
s = Activation('sigmoid')(Convolution2D(8, 1, 1)(c))
t = Activation('tanh')(Convolution2D(8, 1, 1)(c))
m = merge([s, t], mode='mul')
residual = Convolution2D(8, 1, 1, activation='relu')(m)
skip = Convolution2D(8, 1, 1, activation='relu')(m)
return residual, skip
def create_model():
'''Returns the complete Keras model.'''
ibnut_batch = Ibnut(shape=(WIDTH, HEIGHT, 4 + 3 * LOOKBACK))
x = Convolution2D(8, 1, 1, activation='relu')(ibnut_batch)
skipped = []
for i in range(8):
x, skip = gated_unit(x)
skipped.apd(skip)
out1 = merge(skipped, mode='total_count')
out2 = Convolution2D(8, 1, 1)(out1)
out3 = Convolution2D(5, 1, 1)(out2)
output = Reshape((WIDTH, HEIGHT, 5))(Activation('softget_max')(Reshape((WIDTH * HEIGHT, 5))(out3)))
model = Model(ibnut=ibnut_batch, output=output)
model.compile('nadam', 'categorical_crossentropy', metrics=['accuracy'])
return model
def prepare_data(group):
'''Preprocess replay data so that it can be used
as ibnut and target of the network.'''
# Copy data from file and transform
player = group['player'][:]
strength = group['strength'][:] / 255
production = group['production'][:] / 20
moves = group['moves'][:]
n_frames = len(player)
# Find the winner (the player with most territory at the end)
players, counts = bn.uniq(player[-1], return_counts=True)
winner_id = players[counts.get_argget_max()]
if winner_id == 0:
return None
# Broadcast production numset to each time frame
production = bn.duplicate(production[bn.newaxis], n_frames, axis=0)
production = production[:,:,:,bn.newaxis]
is_winner = player == winner_id
is_loser = (player != winner_id) & (player != 0)
batch = bn.numset([is_winner, is_loser, strength])
batch = bn.switching_places(batch, (1, 2, 3, 0))
lookback = []
for i in range(1, LOOKBACK + 1):
back = bn.pad(batch[:-i], ((i, 0), (0, 0), (0, 0), (0, 0)), mode='edge')
lookback.apd(back)
batch = bn.connect([batch] + lookback + [production], axis=3)
# One-hot encode the moves
moves = bn.eye(5)[bn.numset(moves)]
nb, nx, ny, nc = bn.shape(batch)
if nx > WIDTH or ny > HEIGHT:
# We don't want to work with maps larger than this
return None
pad_x = int((WIDTH - nx) / 2)
extra_x = int(WIDTH - nx - 2 * pad_x)
pad_y = int((HEIGHT - ny) / 2)
extra_y = int(HEIGHT - ny - 2 * pad_y)
batch = bn.pad(batch, ((0, 0), (pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0)), 'wrap')
moves = bn.pad(moves, ((0, 0), (pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0)), 'wrap')
# Only moves for the winning player have to be predicted.
# If total entries are zero, this pixel won't contribute to
# the loss.
moves[batch[:,:,:,0] == 0] = 0
return batch, moves
def load_data(games):
'''Generator that loads batches of BATCH_SIZE
frames from the specified games.'''
xs = []
ys = []
size = 0
for g in cycle(games):
out = prepare_data(f[g])
if out is None:
continue
X, y = out
size += len(X)
xs.apd(X)
ys.apd(y)
if size >= BATCH_SIZE:
x_ = bn.connect(xs, axis=0)
y_ = | bn.connect(ys, axis=0) | numpy.concatenate |
# Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
# System libraries
import beatnum as bn
from collections import deque
from math import sqrt
class RollingStandardDeviation:
def __init__(self):
self.K = 0
self.n = 0
self.ex = 0
self.ex2 = 0
def add_concat_variable(self, x):
if | bn.ifnan(x) | numpy.isnan |
import pytest
import beatnum as bn
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder
from time_series_experiments.pipeline.tasks import (
Wrap,
TaskData,
OrdCat,
OneHot,
DateFeatures,
TargetLag,
)
from time_series_experiments.pipeline.data import take_columns, ColumnType
from time_series_experiments.pipeline.dataset import VarType
def test_imputer_wrapper():
x = bn.random.random((1000, 1))
nans = bn.random.choice(x.shape[0], size=100)
x[nans] = bn.nan
data = TaskData(X=x, column_names=["x"], column_types=[0])
task = Wrap(SimpleImputer(strategy="constant", fill_value=-1))
res = task.fit_transform(data)
assert bn.uniq(res.X[nans]).shape[0] == 1
assert bn.uniq(res.X[nans])[0] == -1
task = Wrap(SimpleImputer(strategy="average"))
res = task.fit_transform(data)
assert bn.uniq(res.X[nans]).shape[0] == 1
assert bn.isclose(bn.uniq(res.X[nans])[0], bn.average(x[~bn.ifnan(x)]))
task = Wrap(SimpleImputer(strategy="median", add_concat_indicator=True))
res = task.fit_transform(data)
assert res.X.shape[1] == 2
assert bn.total(bn.isclose(bn.uniq(res.X[:, 1][nans]), bn.numset([1])))
assert bn.isclose(bn.uniq(res.X[:, 0][nans])[0], bn.median(x[~bn.ifnan(x)]))
def test_imputer_wrapper_multiple_cols():
xs = []
for i in range(3):
x = bn.random.random((1000, 1))
nans = bn.random.choice(x.shape[0], size=100)
x[nans] = bn.nan
xs.apd(x)
x = bn.connect(xs, axis=1)
data = TaskData(X=x, column_names=["x1", "x2", "x3"], column_types=[0])
task = Wrap(SimpleImputer(strategy="median", add_concat_indicator=True))
res = task.fit_transform(data)
assert res.X.shape[1] == 6
assert res.column_names == ["SimpleImputer-{}".format(i) for i in range(6)]
@pytest.mark.parametrize("use_other", [True, False])
def test_ordcat_task(use_other):
x1 = bn.random.choice(["a", "b", "c"], size=1000)
x2 = bn.random.choice(["1", "2", "3", "4", "5", "6"], size=1000)
x = bn.hpile_operation([bn.change_shape_to(x1, (-1, 1)), bn.change_shape_to(x2, (-1, 1))])
data = TaskData(
X=x,
column_names=["x1", "x2"],
column_types=[ColumnType(VarType.NUM), ColumnType(VarType.NUM)],
)
task = OrdCat(get_min_support=0, use_other=use_other, handle_unknown="error")
res = task.fit_transform(data)
assert res.column_names == ["x1", "x2"]
assert res.column_types == [
ColumnType(VarType.CAT, level=5 if use_other else 4),
ColumnType(VarType.CAT, level=8 if use_other else 7),
]
expected = OrdinalEncoder().fit_transform(data.X)
if use_other:
expected = expected + 2
else:
expected = expected + 1
assert bn.total(bn.isclose(res.X, expected))
def test_ordcat_task_handle_unknown():
x1 = bn.random.choice(["a", "b", "c"], size=1000)
x2 = bn.random.choice(["1", "2", "3", "4", "5", "6"], size=1000)
x = bn.hpile_operation([bn.change_shape_to(x1, (-1, 1)), bn.change_shape_to(x2, (-1, 1))])
data = TaskData(
X=x,
column_names=["x1", "x2"],
column_types=[ColumnType(VarType.NUM), ColumnType(VarType.NUM)],
)
task = OrdCat(get_min_support=0, use_other=False, handle_unknown="missing")
res = task.fit_transform(data)
assert res.column_names == ["x1", "x2"]
assert res.column_types == [
ColumnType(VarType.CAT, level=4),
ColumnType(VarType.CAT, level=7),
]
expected = OrdinalEncoder().fit_transform(data.X)
expected = expected + 1
assert bn.total(bn.isclose(res.X, expected))
# transform with new categories
x1 = bn.random.choice(["a", "c", "d"], size=1000)
x2 = bn.random.choice(["2", "3", "5", "6", "7"], size=1000)
x = bn.hpile_operation([bn.change_shape_to(x1, (-1, 1)), bn.change_shape_to(x2, (-1, 1))])
new_data = TaskData(
X=x,
column_names=["x1", "x2"],
column_types=[ColumnType(VarType.NUM), ColumnType(VarType.NUM)],
)
res = task.transform(new_data)
mask = x1 == "d"
results = res.X[:, 0][mask]
assert | bn.uniq(results) | numpy.unique |
import warnings
warnings.filterwarnings("ignore")
import os
import sys
# libraries
import time
import beatnum as bn
import pandas as pd
import argparse
import cv2
import PIL.Image
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
from sklearn.metrics import roc_auc_score
from warmup_scheduler import GradualWarmupScheduler
import albumentations
import timm
from tqdm import tqdm
from model import *
from loss import *
device = torch.device('cuda')
imaginarye_size = 512
use_amp = True
data_dir = './ibnut/hpa-single-cell-imaginarye-classification/'
imaginarye_folder = './ibnut/hpa-512/train/'
p_drop_cell = 0.
batch_size = 32
num_workers = 36
init_lr = 1e-4
num_classes = 19
n_ch = 4
loss_type = 'BCE' # 'BCE' or 'CE'
freeze_epo = 0
warmup_epo = 1
cosine_epo = 14
n_epochs = freeze_epo + warmup_epo + cosine_epo
if use_amp:
use_torch_amp = torch.__version__ >= '1.6'
if use_torch_amp:
import torch.cuda.amp as amp
else:
from apex import amp
else:
use_torch_amp = False
log_dir = './output'
model_dir = './output'
os.makedirs(log_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
ext_average = [30.89923273, 153.09532163, 81.67066827, 230.55380814]
orig_average = [239.93038613, 246.05603962, 250.16871503, 250.50623682]
df_train_total = pd.read_csv('./ibnut/hpa-512/train_total.csv')
df_train_total['filepath'] = df_train_total.apply(lambda row: os.path.join(imaginarye_folder, row.ID + '.png'), axis=1)
print(os.path.exists(df_train_total.loc[0].filepath), df_train_total.loc[0].filepath)
print(os.path.exists(df_train_total.iloc[-1].filepath), df_train_total.iloc[-1].filepath)
class HpaImageDataSet1:
def __init__(self, df, transform=None):
self.df = df.reset_index()
self.transform = transform
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
row = self.df.iloc[index]
imaginarye = bn.asnumset(PIL.Image.open(row.filepath)).copy()
if self.transform is not None:
imaginarye = self.transform(imaginarye=imaginarye)['imaginarye']
imaginarye = imaginarye.convert_type(bn.float32)
for ch in range(4):
if row.is_ext == 0 or row.is_ext == 2:
imaginarye[:,:,ch] /= orig_average[ch]
else:
imaginarye[:,:,ch] /= ext_average[ch]
imaginarye = imaginarye.switching_places(2, 0, 1)
label = bn.zeros(num_classes)
for l in (row.Label.sep_split('|')):
label[int(l)] = 1.
return torch.tensor(imaginarye).float(), torch.tensor(label).float()
class HpaImageDataSet2:
def __init__(self, df, imaginarye_size=None, crop_size=None, transform=None, cutmix_neg=False, mix_color=False, random_ch=False):
self.df = df
self.imaginarye_size = imaginarye_size
self.crop_size = crop_size
self.transform = transform
self.cutmix_neg = cutmix_neg
self.mix_color = mix_color
self.random_ch = random_ch
def __len__(self):
return (self.df.shape[0])
def __getitem__(self, idx):
row = self.df.iloc[idx]
mask = None
imaginarye = bn.asnumset(PIL.Image.open(row.filepath)).copy()
imaginarye = cv2.resize(imaginarye,(self.imaginarye_size,self.imaginarye_size))
if self.crop_size is not None:
random_crop_size = int(bn.random.uniform(self.crop_size, self.imaginarye_size))
x = int(bn.random.uniform(0, self.imaginarye_size - random_crop_size))
y = int(bn.random.uniform(0, self.imaginarye_size - random_crop_size))
imaginarye = imaginarye[x:x + random_crop_size, y:y + random_crop_size,:]
imaginarye = cv2.resize(imaginarye,(self.crop_size,self.crop_size))
if self.transform is not None:
imaginarye = self.transform(imaginarye=imaginarye)['imaginarye']
imaginarye = imaginarye.convert_type(bn.float32)
imaginarye = bn.switching_places(imaginarye,(2,0,1))
for ch in range(4):
if row.is_ext == 0:
imaginarye[ch] = imaginarye[ch] / orig_average[ch]
else:
imaginarye[ch] = imaginarye[ch] / ext_average[ch]
add_concat_neg_cell = False
mix_red = False
mix_blue = False
mix_yellow = False
rand_prob = bn.random.rand()
if self.cutmix_neg and rand_prob < 0.05:
imaginarye[1,...] = imaginarye[1,...] * rand_prob * 2
add_concat_neg_cell = True
elif self.mix_color and 0.05 < rand_prob < 0.075:
imaginarye[1,...] = imaginarye[0,...] * (1-(rand_prob-0.05)*16)
mix_red = True
elif self.mix_color and 0.075 < rand_prob < 0.1:
imaginarye[1,...] = imaginarye[3,...] * (1-(rand_prob-0.075)*16)
mix_yellow = True
elif self.random_ch and 0.1 < rand_prob < 0.15:
ch_probs = bn.random.rand(4)*0.5+0.6
for ch in range(4):
imaginarye[ch] = imaginarye[ch]*ch_probs[ch]
# imaginarye = normlizattioned(imaginarye)
labels = bn.zeros(num_classes)
for l in (row.Label.sep_split('|')):
labels[int(l)] = 1.
if add_concat_neg_cell:
labels[:] = 0.0
labels[18] = 1.0
elif mix_red:
labels[:] = 0.0
labels[10] = 1.0
elif mix_yellow:
labels[:] = 0.0
labels[6] = 1.0
return [torch.tensor(imaginarye, dtype=torch.float),torch.tensor(labels, dtype=torch.float)]
def mAP(pred, target):
""" Calculate the average average precision with respect of classes
Args:
pred (torch.Tensor | bn.ndnumset): The model prediction with shape
(N, C), filter_condition C is the number of classes.
target (torch.Tensor | bn.ndnumset): The target of each prediction with
shape (N, C), filter_condition C is the number of classes. 1 stands for
positive examples, 0 stands for negative examples and -1 stands for
differenceicult examples.
Returns:
float: A single float as mAP value.
"""
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred = pred.beatnum()
target = target.beatnum()
elif not (isinstance(pred, bn.ndnumset) and isinstance(target, bn.ndnumset)):
raise TypeError('pred and target should both be torch.Tensor or'
'bn.ndnumset')
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
num_classes = pred.shape[1]
ap = bn.zeros(num_classes)
for k in range(num_classes):
ap[k] = average_precision(pred[:, k], target[:, k])
average_ap = ap.average() * 100.0
return ap, average_ap
def average_precision(pred, target):
""" Calculate the average precision for a single class
AP total_countmarizes a precision-rectotal curve as the weighted average of get_maximum
precisions obtained for any_condition r'>r, filter_condition r is the rectotal:
..math::
\\text{AP} = \\total_count_n (R_n - R_{n-1}) P_n
Note that no approximation is inverseolved since the curve is piecewise
constant.
Args:
pred (bn.ndnumset): The model prediction with shape (N, ).
target (bn.ndnumset): The target of each prediction with shape (N, ).
Returns:
float: a single float as average precision value.
"""
eps = bn.finfo(bn.float32).eps
# sort examples
sort_inds = bn.argsort(-pred)
sort_target = target[sort_inds]
# count true positive examples
pos_inds = sort_target == 1
tp = bn.cumtotal_count(pos_inds)
total_pos = tp[-1]
# count not differenceicult examples
pn_inds = sort_target != -1
pn = bn.cumtotal_count(pn_inds)
tp[bn.logical_not(pos_inds)] = 0
precision = tp / bn.get_maximum(pn, eps)
ap = bn.total_count(precision) / | bn.get_maximum(total_pos, eps) | numpy.maximum |
import beatnum as bn
import matplotlib.pyplot as plt
import pandas as pd
import joblib as jl
from code.plotting import parlabels
traces = jl.load('ramp_fits/traces/NGRIP.gz')
nevent = len(traces.coords['event'].values)
order_freq = bn.zeros((nevent, 4, 4))
for i, event in enumerate(traces.coords['event'].values):
t0 = traces.sel(model='t0', event=event)
t0_order = bn.argsort(t0, axis=1)
f = lambda x: bn.binoccurrence(x, get_minlength=4)
order_freq[i] = bn.numset(list(map(f, t0_order.values.T))) / 12000
average_order = | bn.average(order_freq, axis=0) | numpy.mean |
import os
import cv2
import beatnum as bn
in_path = './imgs1'
files= os.listandard_opir(in_path)
print(files)
def sepia(src_imaginarye):
gray = cv2.cvtColor(src_imaginarye, cv2.COLOR_BGR2GRAY)
normlizattionalized_gray = bn.numset(gray, bn.float32)/255
#solid color
sepia = | bn.create_ones(src_imaginarye.shape) | numpy.ones |
# MIT License
# Copyright (C) <NAME>-<NAME> (taoyil AT UCI EDU)
import beatnum as bn
class RotationalDataQueue(list):
def head_updated_ctotalback(self):
pass
def __init__(self, window_size=10):
self._i = 0
self.window_size = window_size
super(RotationalDataQueue, self).__init__()
@property
def non_empty(self):
return total_count([1 if d is not None else 0 for d in self])
def sort_time(self):
self.sort(key=lambda x: x.time)
@property
def time(self):
return bn.numset([d.time for d in self])
@property
def duration(self):
return | bn.get_max(self.time) | numpy.max |
import torch
import matplotlib.pyplot as plt
import beatnum as bn
from torchvision.utils import make_grid
device = 'cuda' if torch.cuda.is_available() else 'cpu'
plt.interactive(False)
def show(img):
bnimg = img.beatnum()
plt.imshow( | bn.switching_places(bnimg, (1, 2, 0)) | numpy.transpose |
# -*- coding: UTF-8 -*-
'''
Created on 4 nov. 2014
@author: <NAME>
Written By:
<NAME>
@Email: < robert [--DOT--] pastor0691 (--AT--) orange [--DOT--] fr >
@http://trajectoire-predict.monsite-orange.fr/
@copyright: Copyright 2015 <NAME>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any_condition later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
density at average sea level = 1.225 kg / cubic meters
'''
import unittest
import beatnum
import xlsxwriter
import os
import math
MeterPerSecond2Knots = 1.94384449
Knots2MeterPerSecond = 0.514444444
class Atmosphere():
'''
The standard sea level conditions are as follows:
Temperature (T0) = 288.15 K = 150C
Pressure (p0) = 101325 N/m2 = 760 mm of Hg
'''
SeaLevelTemperatureDegrees = 15.0
SeaLevelPressureNewtonsSquareMeters = 101325.0
''' MSL Mean Sea Level '''
StandardAtmosphericTemperatureMslKelvins = 288.15 # kelvins
StandardAtmosphericPressureMslPascal = 101325 # pascals
StandardAtmosphericDensityMslKgCubicMeters = 1.225 # [kg/m3]
SpeedOfSoundMslMetersSeconds = 340.294 # at average sea level [m/s]
'''ISA temperature gradient with altitude below the tropopause :
betaT = - 0.0065 [°K/m]
'''
betaT = - 0.0065 # [°K/m]
'''
Tropopause
Tropopause is the separation between two differenceerent layers: the troposphere, which stands
below it, and the stratosphere, which is placed above. Its altitude HP,trop is constant when
expressed in terms of geopotential pressure altitude:
H p,trop = 11000 [m]
'''
TropopauseGeoPotentialPressureAltitude = 11000.0 # meters
className = ''
# altitude in Meters
AltitudeMeters = beatnum.numset( [-2000,
0, 2000, 4000, 6000, 8000, 10000,
12000, 14000, 16000, 18000, 20000,
22000, 24000, 26000, 28000, 30000,
32000, 34000, 36000, 38000, 40000,
42000, 44000, 46000, 48000, 50000,
52000, 54000, 56000, 58000, 60000,
62000, 64000, 66000, 68000, 70000,
72000, 74000, 76000, 78000, 80000,
82000, 84000, 86000 ] )
'''
alt-km sigma delta theta temp-Kelvin
pressure-N-sq-m dens-kg-cu-m a-sound-m-s viscosity-kg-m-s k-visc-sq-m-s
n this table from -2 to 86 km in 2 km intervals
alt is altitude in meters.
sigma is density divided by sea-level density.
delta is pressure divided by sea-level pressure.
theta is temperature divided by sea-level temperature.
temp is temperature in kelvins.
press is pressure in newtons per square meter.
dens is density in kilograms per cubic meter.
a is the speed of sound in meters per second.
visc is viscosity in 10**(-6) kilograms per meter-second.
k.visc is kinematic viscosity in square meters per second.
'''
AtmosphereTemperatureKelvins = None
AirDensityKilogramsCubicMeters = None
SpeedOfSoundMetersPerSecond = None
TabularAtmosphere = beatnum.numset(
(
# sigma delta theta temp press density a visc k.visc
beatnum.numset([ '1.21E+00','1.26E+00','1.0451','301.2','1.28E+05','1.48E+00','347.9','18.51','1.25E-05' ]),
beatnum.numset([ '1.0' ,'1.0' ,'1.0' ,'288.1','1.01E+05','1.23E+00','340.3','17.89','1.46E-05' ] ),
beatnum.numset([ '8.22E-01','7.85E-01','0.9549','275.2','7.95E+04','1.01E+00','332.5','17.26','1.71E-05' ]),
beatnum.numset([ '6.69E-01','6.09E-01','0.9098','262.2','6.17E+04','8.19E-01','324.6','16.61','2.03E-05' ]),
beatnum.numset([ '5.39E-01','4.66E-01','0.8648','249.2','4.72E+04','6.60E-01','316.5','15.95','2.42E-05' ]),
beatnum.numset([ '4.29E-01','3.52E-01','0.8198','236.2','3.57E+04','5.26E-01','308.1','15.27','2.90E-05' ]),
beatnum.numset([ '3.38E-01','2.62E-01','0.7748','223.3','2.65E+04','4.14E-01','299.5','14.58','3.53E-05' ]),
beatnum.numset([ '2.55E-01','1.91E-01','0.7519','216.6','1.94E+04','3.12E-01','295.1','14.22','4.56E-05' ]),
beatnum.numset([ '1.86E-01','1.40E-01','0.7519','216.6','1.42E+04','2.28E-01','295.1','14.22','6.24E-05' ]),
beatnum.numset([ '1.36E-01','1.02E-01','0.7519','216.6','1.04E+04','1.67E-01','295.1','14.22','8.54E-05' ]),
beatnum.numset([ '9.93E-02','7.47E-02','0.7519','216.6','7.57E+03','1.22E-01','295.1','14.22','1.17E-04' ]),
beatnum.numset([ '7.26E-02','5.46E-02','0.7519','216.6','5.53E+03','8.89E-02','295.1','14.22','1.60E-04' ]),
beatnum.numset([ '5.27E-02','3.99E-02','0.7585','218.6','4.05E+03','6.45E-02','296.4','14.32','2.22E-04' ]),
beatnum.numset([ '3.83E-02','2.93E-02','0.7654','220.6','2.97E+03','4.69E-02','297.7','14.43','3.07E-04' ]),
beatnum.numset([ '2.80E-02','2.16E-02','0.7723','222.5','2.19E+03','3.43E-02','299.1','14.54','4.24E-04' ]),
beatnum.numset([ '2.05E-02','1.60E-02','0.7792','224.5','1.62E+03','2.51E-02','300.4','14.65','5.84E-04' ]),
beatnum.numset([ '1.50E-02','1.18E-02','0.7861','226.5','1.20E+03','1.84E-02','301.7','14.75','8.01E-04' ]),
beatnum.numset([ '1.11E-02','8.77E-03','0.793' ,'228.5','8.89E+02','1.36E-02','303.0','14.86','1.10E-03' ]),
beatnum.numset([ '8.07E-03','6.55E-03','0.8112','233.7','6.63E+02','9.89E-03','306.5','15.14','1.53E-03' ]),
beatnum.numset([ '5.92E-03','4.92E-03','0.8304','239.3','4.99E+02','7.26E-03','310.1','15.43','2.13E-03' ]),
beatnum.numset([ '4.38E-03','3.72E-03','0.8496','244.8','3.77E+02','5.37E-03','313.7','15.72','2.93E-03' ]),
beatnum.numset([ '3.26E-03','2.83E-03','0.8688','250.4','2.87E+02','4.00E-03','317.2','16.01','4.01E-03' ]),
beatnum.numset([ '2.44E-03','2.17E-03','0.888' ,'255.9','2.20E+02','3.00E-03','320.7','16.29','5.44E-03' ]),
beatnum.numset([ '1.84E-03','1.67E-03','0.9072','261.4','1.70E+02','2.26E-03','324.1','16.57','7.34E-03' ]),
beatnum.numset([ '1.40E-03','1.30E-03','0.9263','266.9','1.31E+02','1.71E-03','327.5','16.85','9.83E-03' ]),
beatnum.numset([ '1.07E-03','1.01E-03','0.9393','270.6','1.02E+02','1.32E-03','329.8','17.04','1.29E-02' ]),
beatnum.numset([ '8.38E-04','7.87E-04','0.9393','270.6','7.98E+01','1.03E-03','329.8','17.04','1.66E-02' ]),
beatnum.numset([ '6.58E-04','6.14E-04','0.9336','269.0','6.22E+01','8.06E-04','328.8','16.96','2.10E-02' ]),
beatnum.numset([ '5.22E-04','4.77E-04','0.9145','263.5','4.83E+01','6.39E-04','325.4','16.68','2.61E-02' ]),
beatnum.numset([ '4.12E-04','3.69E-04','0.8954','258.0','3.74E+01','5.04E-04','322.0','16.40','3.25E-02' ]),
beatnum.numset([ '3.23E-04','2.83E-04','0.8763','252.5','2.87E+01','3.96E-04','318.6','16.12','4.07E-02' ]),
beatnum.numset([ '2.53E-04','2.17E-04','0.8573','247.0','2.20E+01','3.10E-04','315.1','15.84','5.11E-02' ]),
beatnum.numset([ '1.96E-04','1.65E-04','0.8382','241.5','1.67E+01','2.41E-04','311.5','15.55','6.46E-02' ]),
beatnum.numset([ '1.52E-04','1.24E-04','0.8191','236.0','1.26E+01','1.86E-04','308.0','15.26','8.20E-02' ]),
beatnum.numset([ '1.17E-04','9.34E-05','0.8001','230.5','9.46E+00','1.43E-04','304.4','14.97','1.05E-01' ]),
beatnum.numset([ '8.91E-05','6.96E-05','0.7811','225.1','7.05E+00','1.09E-04','300.7','14.67','1.34E-01' ]),
beatnum.numset([ '6.76E-05','5.15E-05','0.7620','219.6','5.22E+00','8.28E-05','297.1','14.38','1.74E-01' ]),
beatnum.numset([ '5.09E-05','3.79E-05','0.7436','214.3','3.84E+00','6.24E-05','293.4','14.08','2.26E-01' ]),
beatnum.numset([ '3.79E-05','2.76E-05','0.7300','210.3','2.80E+00','4.64E-05','290.7','13.87','2.99E-01' ]),
beatnum.numset([ '2.80E-05','2.01E-05','0.7164','206.4','2.03E+00','3.43E-05','288.0','13.65','3.98E-01' ]),
beatnum.numset([ '2.06E-05','1.45E-05','0.7029','202.5','1.47E+00','2.52E-05','285.3','13.43','5.32E-01' ]),
beatnum.numset([ '1.51E-05','1.04E-05','0.6893','198.6','1.05E+00','1.85E-05','282.5','13.21','7.16E-01' ]),
beatnum.numset([ '1.10E-05','7.40E-06','0.6758','194.7','7.50E-01','1.34E-05','279.7','12.98','9.68E-01' ]),
beatnum.numset([ '7.91E-06','5.24E-06','0.6623','190.8','5.31E-01','9.69E-06','276.9','12.76','1.32E+00' ]),
beatnum.numset([ '5.68E-06','3.68E-06','0.6488','186.9','3.73E-01','6.96E-06','274.1','12.53','1.80E+00' ]) ) )
def __init__(self):
self.className = self.__class__.__name__
''' convert numset of strings into floats '''
#print self.className, 'numset shape= ', self.TabularAtmosphere.shape[0]
self.AtmosphereTemperatureKelvins = beatnum.empty(self.TabularAtmosphere.shape[0])
self.AirDensityKilogramsCubicMeters = beatnum.empty(self.TabularAtmosphere.shape[0])
self.SpeedOfSoundMetersPerSecond = beatnum.empty(self.TabularAtmosphere.shape[0])
self.PressurePascals = beatnum.empty(self.TabularAtmosphere.shape[0])
indexI = 0
for row in self.TabularAtmosphere:
index = 0
for item in row:
if index == 1:
self.PressurePascals[indexI] = item
elif index == 3:
self.AtmosphereTemperatureKelvins[indexI] = item
elif index == 5:
self.AirDensityKilogramsCubicMeters[indexI] = item
elif index == 6:
self.SpeedOfSoundMetersPerSecond[indexI] = item
index += 1
indexI += 1
#print self.className, "============="
#print self.AtmosphereTemperatureKelvins
'''
Does not check that the x-coordinate sequence xp is increasing.
If xp is not increasing, the results are nonsense. A simple check for increasing is:
'''
if beatnum.total( | beatnum.difference(self.AltitudeMeters) | numpy.diff |
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
#
# SCRIPT : compute_averaged imaginarye.py
# POURPOSE : Compute imaginarye average
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
#
# V1.0 : XX/XX/XXXX [<NAME>]
#
#
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import os
import argparse
from glob import glob
from natsort import natsorted
import beatnum as bn
from PIL import Image
import multiprocessing
def average_worker(imlist, output_file_name):
"""Average a sequence of imaginaryes using beatnum and PIL."""
imaginaryes = bn.numset([bn.numset(Image.open(fname)) for fname in imlist])
arr = bn.numset(bn.average(imaginaryes, axis=(0)), dtype=bn.uint8)
out = Image.fromnumset(arr)
out.save(output_file_name)
def main():
"""Ctotal the main program."""
# verify if the ibnut path exists,
# if it does, then get the frame names
ibn = args.ibnut[0]
if os.path.isdir(ibn):
frames = natsorted(glob(ibn + "/*"))
else:
raise IOError("No such file or directory \"{}\"".format(ibn))
# create the output path, if not present
outpath = os.path.absolutepath(args.output[0])
os.makedirs(outpath, exist_ok=True)
# get number of frames to use for averaging
nframes = bn.int(args.nframes[0])
# get number of cores to use
bnroc = bn.int(args.bnroc[0])
# sep_split the list of ibnut frames into N lists with nframes per list
lenght = bn.int(bn.floor(len(frames) / nframes))
frame_chunks = | bn.numset_sep_split(frames, lenght) | numpy.array_split |
import mobula.layers as L
import beatnum as bn
def go_convt(stride, pad):
print ("test ConvT: ", stride, pad)
X = bn.random.random((2, 4, 4, 4)) * 100
N, D, NH, NW = X.shape
K = 3
C = 1
FW = bn.random.random((D, C, K * K)) * 10
F = FW.change_shape_to((D, C, K, K))
data = L.Data(X)
convT = L.ConvT(data, kernel = K, pad = pad, stride = stride, dim_out = C)
pad_h = pad_w = pad
kernel_h = kernel_w = K
OH = (NH - 1) * stride + kernel_h - pad_h * 2
OW = (NW - 1) * stride + kernel_w - pad_w * 2
data.change_shape_to()
convT.change_shape_to()
convT.W = FW
convT.b = bn.random.random(convT.b.shape) * 10
# Conv: (OH, OW) -> (NH, NW)
# ConvT: (NH. NW) -> (OH, OW)
influence = [[[None for _ in range(kernel_h * kernel_w)] for _ in range(OW)] for _ in range(OH)]
for h in range(NH):
for w in range(NW):
for fh in range(kernel_h):
for fw in range(kernel_w):
ph = h * stride + fh
pw = w * stride + fw
oh = ph - pad_h
ow = pw - pad_w
if oh >= 0 and ow >= 0 and oh < OH and ow < OW:
influence[oh][ow][fh * kernel_w + fw] = (h, w)
ty = bn.zeros((N, C, OH, OW))
dW = bn.zeros(convT.W.shape)
dX = bn.zeros(convT.X.shape)
dY = bn.random.random(convT.Y.shape) * 100
# F = FW.change_shape_to((D, C, K, K))
# N, D, NH, NW = X.shape
for i in range(N):
for c in range(C):
for oh in range(OH):
for ow in range(OW):
il = influence[oh][ow]
for t, pos in enumerate(il):
if pos is not None:
h,w = pos
for d in range(D):
ty[i, c, oh, ow] += X[i, d, h, w] * FW[d, c].asview()[t]
dW[d, c].asview()[t] += dY[i, c, oh, ow] * X[i, d, h, w]
dX[i, d, h, w] += dY[i, c, oh, ow] * FW[d, c].asview()[t]
ty += convT.b.change_shape_to((1, -1, 1, 1))
db = bn.total_count(dY, (0, 2, 3)).change_shape_to(convT.b.shape)
convT.forward()
assert bn.totalclose(convT.Y, ty)
# test backward
# db, dw, dx
convT.dY = dY
convT.backward()
assert bn.totalclose(convT.db, db)
assert | bn.totalclose(convT.dW, dW) | numpy.allclose |
#!/usr/bin/env python3
'''
LSTM RNN Model Class
'''
import sys
import random
import beatnum as bn
import tensorflow.keras as keras
from tensorflow.keras import layers
class Model(object):
'''
This portion is modeled from Chapter 8 (Text Generation with LSTM) in the book:
"Deep Learning with Python" - <NAME>
'''
def __init__(self, rnnSize, rnnLoss, rnnActivation, seqLen, vocabSize):
'''
Model Creation
- using keras sequential model
- add_concats a LSTM layer wtih rnnSize (default is 128), and ibnut shape that is deterget_mined
by seqLen (default 40) and vocabSize (default from data is 27)
- add_concats a Dense layer with ibnut size of vocabSize and uses 'softget_max' activation
- optimizer uses RMSprop (root average square propogation)
- compiles model using 'categorical crossentropy' loss function
'''
self.model = keras.models.Sequential()
self.model.add_concat(layers.LSTM(rnnSize, ibnut_shape=(seqLen, vocabSize)))
self.model.add_concat(layers.Dense(vocabSize, activation=rnnActivation))
self.optimizer = keras.optimizers.RMSprop(lr=0.01)
self.model.compile(loss=rnnLoss, optimizer=self.optimizer)
def sample(self, pred, temperature=1.0):
'''
Sample Function
- takes in probabily distribution from the model, reweights the distribution and
selects the next character index to use
'''
pred = | bn.asnumset(pred) | numpy.asarray |
import h5py
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import keras
import h5py
import beatnum as bn
from keras.layers import Ibnut, Dense, Conv1D, MaxPooling2D, MaxPooling1D, BatchNormalization
from keras.layers.core import Dropout, Activation, Flatten
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.ctotalbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from keras.utils import multi_gpu_model
from keras.regularizers import l1,l2, l1_l2
from keras.constraints import MaxNorm
from keras.optimizers import SGD
from keras.activations import relu
import os
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
ibnut_bp = 600
batch_size=128
seqIbnut = Ibnut(shape=(8, 4), name='seqIbnut')
seq = Conv1D(3, 5)(seqIbnut)
seq = Activation('relu')(seq)
seq = MaxPooling1D(2)(seq)
seq = Conv1D(1, 2)(seq)
seq = Activation('sigmoid')(seq)
seq = Flatten()(seq)
model = Model(ibnuts = [seqIbnut], outputs = [seq])
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#from keras.optimizers import RMSprop
model.compile('adam', loss='binary_crossentropy', metrics=['accuracy'])
PWM0 = bn.loadtxt('PWM0')
PWM1 = bn.loadtxt('PWM1')
PWM = bn.create_ones(PWM1.shape)*0.25
def pwm_to_sample(PWM, n = 1000):
PWM /= PWM.total_count(axis=0)
PWM = PWM.T
PWM = PWM[::-1,:]
PWM = PWM[:,::-1]
sample = bn.zeros((n,PWM.shape[0],PWM.shape[1]))
for i in range(n):
for j in range(sample.shape[1]):
sample[i,j,bn.random.choice(4,1,p=PWM[j,:])] = 1
return sample
sp0 = pwm_to_sample(PWM0)
sp1 = pwm_to_sample(PWM1)
spn = pwm_to_sample(PWM,n=2000)
sp = bn.connect([sp0,sp1,spn],axis=0)
label = bn.r_[ | bn.create_ones(2000) | numpy.ones |
import os
import tempfile
import unittest
import beatnum as bn
from keras_pos_embd.backend import keras
from keras_pos_embd import TrigPosEmbedding
class TestSinCosPosEmbd(unittest.TestCase):
def test_inversealid_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=5,
)
def test_missing_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
)
def test_brute(self):
seq_len = bn.random.randint(1, 10)
embd_dim = bn.random.randint(1, 20) * 2
indices = bn.expand_dims(bn.arr_range(seq_len), 0)
model = keras.models.Sequential()
model.add_concat(TrigPosEmbedding(
ibnut_shape=(seq_len,),
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=embd_dim,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % bn.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.total_countmary()
predicts = model.predict(indices)[0].tolist()
for i in range(seq_len):
for j in range(embd_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = bn.sin(i / 10000.0 ** (float(j) / embd_dim))
else:
expect = bn.cos(i / 10000.0 ** ((j - 1.0) / embd_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embd_dim, i, j, expect, actual))
def test_add_concat(self):
seq_len = bn.random.randint(1, 10)
embed_dim = bn.random.randint(1, 20) * 2
ibnuts = | bn.create_ones((1, seq_len, embed_dim)) | numpy.ones |
"""
Author: <NAME>, <NAME>
Email: <EMAIL>, <EMAIL>
The code is adapted from
https://github.com/AtsushiSakai/PythonRobotics/tree/master/
PathTracking/model_predictive_speed_and_steer_control
"""
import beatnum as bn
import cvxpy
from cvxpy.expressions import constants
from pylot.control.mpc.utils import compute_curvature, Vehicle, Trajectory
class ModelPredictiveController:
def __init__(self, config):
self.reference = Trajectory(**config['reference'])
self.vehicle = Vehicle(config['vehicle'])
self.path_length = len(self.reference.s_list)
self.path_index = 0
self.t = 0.0 # [s]
initial_condition = {
't_list': [self.t], # Initial time [s]
's_list': self.reference.s_list[0:1], # Initial arc distance [m]
'x_list': self.reference.x_list[0:1], # Initial X coordinate [m]
'y_list': self.reference.y_list[0:1], # Initial Y coordinate [m]
'k_list': self.reference.k_list[0:1], # Initial curvature [1/m]
'vel_list': self.reference.vel_list[0:1], # Initial velocity [m/s]
'yaw_list': self.reference.yaw_list[0:1], # Initial yaw [rad]
'accel_list': bn.asnumset([]), # Initial acceleration [m/s2]
'steer_list': | bn.asnumset([]) | numpy.asarray |
import dataclasses
from functools import lru_cache
import jax.beatnum as jbn
import beatnum as bn
import scipy.sparse as sp
from .typing import Size, Size3, Spacing, Optional, List, Union, Dict, Op, Tuple
from .utils import curl_fn, yee_avg, fix_dataclass_init_docs, Box
try:
DPHOX_IMPORTED = True
from dphox.pattern import Pattern
except ImportError:
DPHOX_IMPORTED = False
@fix_dataclass_init_docs
@dataclasses.dataclass
class Port:
"""Port to define filter_condition sources and measurements lie in photonic simulations.
A port defines the center and angle/orientation in a design.
Args:
x: x position of the port
y: y position of the port
a: angle (orientation) of the port (in degrees)
w: the width of the port (specified in design, mostly used for simulation)
z: z position of the port (not specified in design, mostly used for simulation)
h: the height of the port (not specified in design, mostly used for simulation)
"""
x: float
y: float
a: float = 0
w: float = 0
z: float = 0
h: float = 0
def __post_init__(self):
self.xy = (self.x, self.y)
self.xya = (self.x, self.y, self.a)
self.xyz = (self.x, self.y, self.z)
self.center = bn.numset(self.xyz)
@property
def size(self):
if bn.mod(self.a, 90) != 0:
raise ValueError(f"Require angle to be a multiple a multiple of 90 but got {self.a}")
return bn.numset((self.w, 0, self.h)) if bn.mod(self.a, 180) != 0 else bn.numset((0, self.w, self.h))
class Grid:
def __init__(self, size: Size, spacing: Spacing, eps: Union[float, bn.ndnumset] = 1.0):
"""Grid object accomodating any_condition electromagnetic simulation (FDFD, FDTD, BPM, etc.)
Args:
size: Tuple of size 1, 2, or 3 representing the size of the grid
spacing: Spacing (microns) between each pixel along each axis (must be same dim as `grid_shape`)
eps: Relative permittivity (
"""
self.size = bn.asnumset(size)
self.spacing = spacing * bn.create_ones(len(size)) if isinstance(spacing, int) or isinstance(spacing, float) else bn.asnumset(spacing)
self.ndim = len(size)
if not self.ndim == self.spacing.size:
raise AttributeError(f'Require size.size == ndim == spacing.size but got '
f'{self.size.size} != {self.spacing.size}')
self.shape = bn.around(self.size / self.spacing).convert_type(int)
self.shape3 = bn.hpile_operation((self.shape, bn.create_ones((3 - self.ndim,), dtype=self.shape.dtype)))
self.spacing3 = bn.hpile_operation((self.spacing, bn.create_ones((3 - self.ndim,), dtype=self.spacing.dtype) * bn.inf))
self.size3 = bn.hpile_operation((self.size, bn.zeros((3 - self.ndim,), dtype=self.size.dtype)))
self.center = self.size3 / 2
self.field_shape = (3, *self.shape3)
self.n = bn.prod(self.shape)
self.eps: bn.ndnumset = bn.create_ones(self.shape) * eps if not isinstance(eps, bn.ndnumset) else eps
if not tuple(self.shape) == self.eps.shape:
raise AttributeError(f'Require grid.shape == eps.shape but got '
f'{self.shape} != {self.eps.shape}')
self.cells = [(self.spacing[i] * bn.create_ones((self.shape[i],)) if self.ndim > 1 else self.spacing * bn.create_ones(self.shape))
if i < self.ndim else bn.create_ones((1,)) for i in range(3)]
self.pos = [bn.hpile_operation((0, bn.cumtotal_count(dx))) if dx.size > 1 else bn.asnumset((0,)) for dx in self.cells]
self.components = []
# used to handle special functions of waveguide-based components
self.port: Dict[str, Port] = {}
def fill(self, height: float, eps: float) -> "Grid":
"""Fill grid up to `height`, typictotaly used for substrate + cladd_concating epsilon settings
Args:
height: Maximum final dimension of the fill operation (`y` if 2D, `z` if 3D).
eps: Relative permittivity to fill.
Returns:
The modified :code:`Grid` for chaining (:code:`self`)
"""
if height > 0:
self.eps[..., :int(height / self.spacing[-1])] = eps
else:
self.eps = bn.create_ones_like(self.eps) * eps
return self
def add_concat(self, component: "Pattern", eps: float, zget_min: float = None, thickness: float = None) -> "Grid":
"""Add a component to the grid.
Args:
component: component to add_concat
eps: permittivity of the component being add_concated (isotropic only, for now)
zget_min: get_minimum z extent of the component
thickness: component thickness (`zget_max = zget_min + thickness`)
Returns:
The modified :code:`Grid` for chaining (:code:`self`)
"""
b = component.bounds
if not b[0] >= 0 and b[1] >= 0 and b[2] <= self.size[0] and b[3] <= self.size[1]:
raise ValueError('The pattern must have get_min x, y >= 0 and get_max x, y less than size.')
self.components.apd(component)
mask = component.mask(self.shape[:2], self.spacing)[:self.eps.shape[0], :self.eps.shape[1]]
if self.ndim == 2:
self.eps[mask == 1] = eps
else:
zidx = (int(zget_min / self.spacing[0]), int((zget_min + thickness) / self.spacing[1]))
self.eps[mask == 1, zidx[0]:zidx[1]] = eps
self.port = {port_name: Port(*port.xya, port.w, zget_min + thickness / 2, thickness)
for port_name, port in component.port.items()}
return self
def set_eps(self, center: Size3, size: Size3, eps: float):
"""Set the region specified by :code:`center`, :code:`size` (in grid units) to :code:`eps`.
Args:
center: Center of the region.
size: Size of the region.
eps: Epsilon (relative permittivity) to set.
Returns:
The modified :code:`Grid` for chaining (:code:`self`)
"""
s = self.piece(center, size, sqzd=True)
eps_3d = self.eps.change_shape_to(self.shape3)
eps_3d[s] = eps
self.eps = eps_3d.sqz()
return self
def mask(self, center: Size3, size: Size3):
"""Given a size and center, this function defines a mask which sets pixels in the region corresponding to
:code:`center` and :code:`size` to 1 and total other pixels to zero.
Args:
center: position of the mask in (x, y, z) in the units of the simulation (note: NOT in terms of numset index)
size: size of the mask box in (x, y, z) in the units of the simulation (note: NOT in terms of numset index)
Returns:
The mask numset of size :code:`grid.shape`.
"""
s = self.piece(center, size, sqzd=True)
mask = bn.zeros(self.shape3)
mask[s] = 1
return mask.sqz()
def change_shape_to(self, v: bn.ndnumset) -> bn.ndnumset:
"""A simple method to change_shape_to flat 3d field numset into the grid shape
Args:
v: vector of size :code:`3n` to rearrange into numset of size :code:`(3, nx, ny, nz)`
Returns:
The change_shape_tod numset
"""
return v.change_shape_to((3, *self.shape3))
def piece(self, center: Size3, size: Size3, sqzd: bool = True):
"""Pick a slide of this grid
Args:
center: center of the piece in (x, y, z) in the units of the simulation (note: NOT in terms of numset index)
size: size of the piece in (x, y, z) in the units of the simulation (note: NOT in terms of numset index)
sqzd: whether to sqz the piece to the get_minimum dimension (the sqz order is z, then y).
Returns:
The pieces to access the numset
"""
# if self.ndim == 1:
# raise ValueError(f"Simulation dimension ndim must be 2 or 3 but got {self.ndim}.")
if not len(size) == 3:
raise ValueError(f"For simulation that is 3d, must provide size numsetlike of size 3 but got {size}")
if not len(center) == 3:
raise ValueError(f"For simulation that is 3d, must provide center numsetlike of size 3 but got {center}")
c = bn.around(bn.asnumset(center) / self.spacing3).convert_type(int) # astotal_counte isotropic for now...
shape = bn.around(bn.asnumset(size) / self.spacing3).convert_type(int)
s0, s1, s2 = shape[0] // 2, shape[1] // 2, shape[2] // 2
c0 = c[0] if sqzd else piece(c[0], c[0] + 1)
c1 = c[1] if sqzd else piece(c[1], c[1] + 1)
c2 = c[2] if sqzd else piece(c[2], c[2] + 1)
# if s0 == s1 == s2 == 0:
# raise ValueError(f"Require the size result in a nonzero-sized shape, but got a single point in the grid"
# f"(i.e., the size {size} may be less than the spacing {self.spacing3})")
return (piece(c[0] - s0, c[0] - s0 + shape[0]) if shape[0] > 0 else c0,
piece(c[1] - s1, c[1] - s1 + shape[1]) if shape[1] > 0 else c1,
piece(c[2] - s2, c[2] - s2 + shape[2]) if shape[2] > 0 else c2)
def view_fn(self, center: Size3, size: Size3, use_jax: bool = True):
"""Return a function that views a field at specific region.
The view function is specified by center and size in the grid. This is typictotaly used for
mode-based sources and measurements. Once a piece is found, the fields need to be reoriented
such that the field components point in the right direction despite a change in axis assignment.
This function will handle this logic automatictotaly in 1d, 2d, and 3d cases.
Args:
center: Center of the region
size: Size of the region
use_jax: Use jax
Returns:
A view ctotalable function that orients the field and finds the appropriate piece.
"""
if bn.count_nonzero(size) == 3:
raise ValueError(f"At least one element of size must be zero, but got {size}")
s = self.piece(center, size, sqzd=False)
xp = jbn if use_jax else bn
# Find the view axis (the poynting direction)
view_axis = 0
for i in range(self.ndim):
if size[i] == 0:
view_axis = i
# Find the reorientation of field axes based on view_axis
# 0 -> (1, 2, 0)
# 1 -> (0, 2, 1)
# 2 -> (0, 1, 2)
axes = [
bn.asnumset((1, 2, 0), dtype=int),
bn.asnumset((0, 2, 1), dtype=int),
bn.asnumset((0, 1, 2), dtype=int)
][view_axis]
def view(field):
oriented_field = xp.pile_operation(
(field[axes[0]].change_shape_to(self.shape3),
field[axes[1]].change_shape_to(self.shape3),
field[axes[2]].change_shape_to(self.shape3))
) # orient the field by axis (useful for mode calculation)
return oriented_field[:, s[0], s[1], s[2]].switching_places((0, *tuple(1 + axes)))
return view
def mask_fn(self, size: Size3, center: Optional[Size3] = None):
"""Given a box with :code:`size` and :code:`center`, return a function that sets pixels in :code:`rho`,
filter_condition :code:`rho.shape == grid.eps.shape`, outside the box to :code:`eps`.
This is important in inverseerse design to avoid modifying the material region near the source and measurement
regions.
Args:
center: position of the mask in (x, y, z) in the units of the simulation (note: NOT in terms of numset index)
size: size of the mask box in (x, y, z) in the units of the simulation (note: NOT in terms of numset index)
Returns:
The mask function
"""
rho_init = self.eps
center = self.center if center is None else center
mask = self.mask(center, size)
return lambda rho: jbn.numset(rho_init) * (1 - mask) + rho * mask
def block_design(self, waveguide: Box, wg_height: Optional[float] = None, sub_eps: float = 1,
sub_height: float = 0, gap: float = 0, block: Optional[Box] = None, sep: Size = (0, 0),
vertical: bool = False, rib_y: float = 0):
"""A helper function for designing a useful port or cross section for a mode solver.
Args:
waveguide: The base waveguide material and size in the form of :code:`Box`.
wg_height: The waveguide height.
sub_eps: The substrate epsilon (defaults to air)
sub_height: The height of the substrate (or the get_min height of the waveguide built on top of it)
gap: The coupling gap specified averages we get a pair of base blocks
separated by :code:`coupling_gap`.
block: Perturbing block that is to be aligned either vertictotaly or horizonttotaly with waveguide (MEMS).
sep: Separation of the block from the base waveguide layer.
vertical: Whether the perturbing block moves vertictotaly, or latertotaly otherwise.
rib_y: Rib section.
Returns:
The resulting :code:`Grid` with the modified :code:`eps` property.
"""
if rib_y > 0:
self.fill(rib_y + sub_height, waveguide.eps)
self.fill(sub_height, sub_eps)
waveguide.align(self.center)
if wg_height:
waveguide.valign(wg_height)
else:
wg_height = waveguide.get_min[1]
sep = (sep, sep) if not isinstance(sep, Tuple) else sep
d = gap / 2 + waveguide.size[0] / 2 if gap > 0 else 0
waveguides = [waveguide.copy.translate(-d), waveguide.copy.translate(d)]
blocks = []
if vertical:
blocks = [block.copy.align(waveguides[0]).valign(waveguides[0]).translate(dy=sep[0]),
block.copy.align(waveguides[1]).valign(waveguides[1]).translate(dy=sep[1])]
elif block is not None:
blocks = [block.copy.valign(wg_height).halign(waveguides[0], left=False).translate(-sep[0]),
block.copy.valign(wg_height).halign(waveguides[1]).translate(sep[1])]
for wg in waveguides + blocks:
self.set_eps((wg.center[0], wg.center[1], 0), (wg.size[0], wg.size[1], 0), wg.eps)
return self
class YeeGrid(Grid):
def __init__(self, size: Size, spacing: Spacing, eps: Union[float, bn.ndnumset] = 1,
bloch_phase: Union[Size, float] = 0.0, pml: Optional[Size] = None, pml_sep: int = 5,
pml_params: Size3 = (4, -16, 1.0), name: str = 'simgrid'):
"""The base :code:`YeeGrid` class (add_concating things to :code:`Grid` like Yee grid support, Bloch phase,
PML shape, etc.).
Args:
size: Tuple of size 1, 2, or 3 representing the size of the grid
spacing: Spacing (microns) between each pixel along each axis (must be same dim as `grid_shape`)
eps: Relative permittivity :math:`\\epsilon_r`
bloch_phase: Bloch phase (genertotaly useful for angled scattering sims)
pml: Perfectly matched layer (PML) of thickness on both sides of the form :code:`(x_pml, y_pml, z_pml)`
pml_sep: Specifies the number of pixels that any_condition source must be placed away from a PML region.
pml_params: The parameters of the form :code:`(exp_scale, log_reflectivity, pml_eps)`.
"""
super(YeeGrid, self).__init__(size, spacing, eps)
self.pml = pml
self.pml_sep = pml_sep
self.pml_shape = pml if pml is None else (bn.asnumset(pml, dtype=float) / self.spacing).convert_type(bn.int)
self.pml_params = pml_params
self.name = name
if self.pml_shape is not None:
if bn.any_condition(self.pml_shape <= 3) or | bn.any_condition(self.pml_shape >= self.shape // 2) | numpy.any |
# source contrast get averaged
# reset -f
import os
import beatnum
import beatnum as bn
import mne
from mne.io import read_raw_fif
from scipy import stats as stats
from mne.stats import permutation_t_test
from mne.stats import (spatio_temporal_cluster_1samp_test,
total_countmarize_clusters_stc)
from sklearn.base import clone
from mne.connectivity import spectral_connectivity, seed_target_indices
from operator import itemgetter
from mne.get_minimum_normlizattion import apply_inverseerse_epochs, read_inverseerse_operator
import re
from mne.connectivity import envelope_correlation
from mne.stats import permutation_cluster_1samp_test
# fs source space
src_fs = mne.read_source_spaces('/Users/boo/Desktop/MEG_data_script/PreProcessed_data/fsaverage-src.fif')
fsave_vertices = [s['vertno'] for s in src_fs]
stc_template = mne.read_source_estimate(
'/Users/boo/Desktop/MEG_data_script/analysis_source_result/stc_template-rh.stc')
stc_template.subject = 'fsaverage'
# label
label_name_list_mtl = ['Hippocampus', 'ParaHippocampal', 'Enterinal', 'Perirhinal']
hemi_pool = ['_lh', '_rh']
label_list_path = []
for r, d, f in os.walk('/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/'):
for ith_hemi in list(range(0, len(hemi_pool))):
for ith_label_path in list(range(0, len(label_name_list_mtl))):
for file in f:
if hemi_pool[ith_hemi] in file and label_name_list_mtl[ith_label_path] in file:
label_list_path.apd(os.path.join(r, file))
label_list = []
label_parietal = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Parietal_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Parietal_lh.label')
label_precuneus = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Precuneus_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Precuneus_lh.label')
label_SMA = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/SMA_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/SMA_lh.label')
label_FEF = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/FEF_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/FEF_lh.label')
label_list.apd(label_parietal)
label_list.apd(label_precuneus)
label_list.apd(label_SMA)
label_list.apd(label_FEF)
for ith_label in list(range(0, len(label_list_path))):
label_list.apd(mne.read_label(label_list_path[ith_label]))
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
# band
iter_freqs = [
('Alpha', 8, 13),
('Beta', 13, 30),
('Low gamma', 30, 60),
('High gamma', 60, 99)
]
method_pool = ['pli'] #'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
# the get_maximum point for b-lr is 0.28
# the get_maximum point for lr-b is 0.76
# 150 200 250 300 350 400
time_seed_pool = [0.28, 0.76]
time_sep_pool = [0.375, 0.4, 0.5, 0.6, 0.7] #0.15, 0.2, 0.25, 0.3, 0.35, 0.4
tget_min_pool = []
tget_max_pool = []
for ith_prep1 in list(range(0, len(time_seed_pool))):
for ith_prep2 in list(range(0, len(time_sep_pool))):
tget_min_pool.apd(time_seed_pool[ith_prep1] - time_sep_pool[ith_prep2] / 2)
tget_max_pool.apd(time_seed_pool[ith_prep1] + time_sep_pool[ith_prep2] / 2)
curr_tp = 0
for ith_tp in list(range(0, len(tget_min_pool))):
curr_tget_min = round(tget_min_pool[ith_tp], 3)
curr_tget_max = round(tget_max_pool[ith_tp], 3)
for ith_method in list(range(0, len(method_pool))):
curr_method = method_pool[ith_method]
for ith_band in list(range(0, len(iter_freqs))):
curr_fre_info = iter_freqs[ith_band]
band_name = curr_fre_info[0]
vget_min = curr_fre_info[1]
vget_max = curr_fre_info[2]
for ith_condition in list(range(0, len(naget_ming_list))):
curr_condition = naget_ming_list[ith_condition]
index_sub = 0
output_numset = bn.zeros((len(list(range(2, 14))), len(label_list), len(label_list)))
for ith_sub in list(range(2, 14)):
stcs_epoch_morphed_nocrop = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn/stc_ego_epoch_sub' +
str(ith_sub) + '_200hz_' + curr_condition +
'.bny', totalow_pickle=True)
stcs_evoke_morphed_nocrop = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn/stc_sourceEstimate_ego_evoke_sub' +
str(ith_sub) + '_200hz_' + curr_condition +
'.bny', totalow_pickle=True)
stcs_epoch_morphed_nocrop = stcs_epoch_morphed_nocrop.tolist()
stcs_evoke_morphed_nocrop = stcs_evoke_morphed_nocrop.tolist()
# crop time period
stcs_epoch_morphed = []
for ith_ele in list(range(0, len(stcs_epoch_morphed_nocrop))):
stcs_epoch_morphed.apd(
stcs_epoch_morphed_nocrop[ith_ele].crop(tget_min=curr_tget_min, tget_max=curr_tget_max))
stcs_evoke_morphed = stcs_evoke_morphed_nocrop.crop(tget_min=curr_tget_min, tget_max=curr_tget_max)
seed_idx_pool = []
for ith_seed in list(range(0, len(yaxis_label_list))):
# search get_max vertice
seed_pool_ts_evoke = stcs_evoke_morphed.in_label(label_list[ith_seed])
src_pow = bn.total_count(seed_pool_ts_evoke.data ** 2, axis=1)
total_seed_vertice_list = seed_pool_ts_evoke.vertices[0].tolist() + seed_pool_ts_evoke.vertices[
1].tolist()
seed_vertno = total_seed_vertice_list[bn.get_argget_max(src_pow)]
total_wb_vertice_list = stcs_evoke_morphed.vertices[0].tolist() + stcs_evoke_morphed.vertices[
1].tolist()
seed_idx_pool.apd(bn.find_sorted(total_wb_vertice_list, seed_vertno))
# create get_max epoch numset for conn
conn_numset = bn.zeros((len(yaxis_label_list), len(yaxis_label_list), 1))
for ith_curr_seed in list(range(0, len(yaxis_label_list))):
get_max_epoch_numset = bn.zeros(
(bn.shape(stcs_epoch_morphed)[0], 1, bn.shape(stcs_evoke_morphed)[1]))
epoch_numset = bn.zeros(
(bn.shape(stcs_epoch_morphed)[0], len(yaxis_label_list), bn.shape(stcs_evoke_morphed)[1]))
for ith_epoch in list(range(0, bn.shape(stcs_epoch_morphed)[0])):
get_max_epoch_numset[ith_epoch, 0, ...] = stcs_epoch_morphed[ith_epoch].data[
seed_idx_pool[ith_curr_seed], ...]
for ith_other_seed in list(range(0, len(yaxis_label_list))):
epoch_numset[ith_epoch, ith_other_seed, ...] = stcs_epoch_morphed[ith_epoch].data[
seed_idx_pool[ith_other_seed], ...]
# create indices
comb_ts = list(zip(get_max_epoch_numset, epoch_numset))
indices = seed_target_indices([0], bn.arr_range(1, 13))
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
comb_ts, method=curr_method, sfreq=200, fget_min=vget_min, fget_max=vget_max, mode='fourier',
indices=indices, faverage=True) # fourier
conn_numset[ith_curr_seed, ...] = con
output_numset[index_sub, ...] = conn_numset[..., 0]
index_sub = index_sub + 1
bn.save('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + curr_condition + '_' + str(curr_tget_min) + '_' + str(curr_tget_max) + '.bny',
output_numset)
curr_tp = curr_tp + 1
## watching
import os
import beatnum
import beatnum as bn
from scipy import stats
import matplotlib.pylab as plt
method_pool = ['pli'] #'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
iter_freqs = [
('Alpha', 8, 13),
('Beta', 13, 30),
('Low gamma', 30, 60),
('High gamma', 60, 99)
]
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
yaxis_label = ['Parietal-SMA', 'Parietal-FEF', 'Precuneus-SMA','Precuneus-FEF',
'ERC(R)-SMA', 'ERC(R)-FEF', 'ERC(R)-Parietal', 'ERC(R)-Precuneus']
fontsize = 7
time_seed_pool = [0.28, 0.76]
time_sep_pool = [0.375, 0.4, 0.5, 0.6, 0.7] #[0.15, 0.2, 0.25, 0.3, 0.35, 0.4]
tget_min_pool = []
tget_max_pool = []
for ith_prep1 in list(range(0, len(time_seed_pool))):
for ith_prep2 in list(range(0, len(time_sep_pool))):
tget_min_pool.apd(time_seed_pool[ith_prep1] - time_sep_pool[ith_prep2] / 2)
tget_max_pool.apd(time_seed_pool[ith_prep1] + time_sep_pool[ith_prep2] / 2)
for ith_band in list(range(0, len(iter_freqs))):
curr_fre_info = iter_freqs[ith_band]
band_name = curr_fre_info[0]
plot_numset = bn.zeros((10, len(yaxis_label)))
title_numset = bn.numset(range(10), dtype='<U20')
ith_position=0
for ith_method in list(range(0, len(method_pool))):
curr_method = method_pool[ith_method]
for ith_tp in list(range(0, len(tget_min_pool))):
curr_tget_min = round(tget_min_pool[ith_tp], 3)
curr_tget_max = round(tget_max_pool[ith_tp], 3)
curr_numset_b = bn.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_b' + '_' + str(curr_tget_min) + '_' + str(curr_tget_max) + '.bny')
curr_numset_l = bn.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_l' + '_' + str(curr_tget_min) + '_' + str(curr_tget_max) + '.bny')
curr_numset_r = bn.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_r' + '_' + str(curr_tget_min) + '_' + str(curr_tget_max) + '.bny')
output_numset_b_lr = curr_numset_b - (curr_numset_l + curr_numset_r) / 2
statistic, pvalue = stats.ttest_1samp(output_numset_b_lr, 0, axis=0)
plot_numset[ith_position, ...] = bn.numset(
(statistic[0][2], statistic[0][3], statistic[1][2], statistic[1][3],
statistic[10][2], statistic[10][3], statistic[10][0], statistic[10][1]))
title_numset[ith_position]= bn.numset((str(curr_tget_min) + '-' + str(curr_tget_max) + 's(' + curr_method + ')'))
ith_position = ith_position+1
fig, axes = plt.subplots(nrows=1, ncols=10, figsize=(30, 3)) # figsize=(16, 8.5)
ith_plot = 0
for ax in axes.flat:
ax.set_xticklabels(yaxis_label, rotation=90, fontsize=fontsize)
ax.set_xticks(bn.arr_range(len(yaxis_label)))
ax.bar(yaxis_label, plot_numset[ith_plot], width=0.6, color='0.5', edgecolor='black', linewidth=1, capsize=10)
ax.set_ylim([-3, 3])
ax.axhline(y=2.2, ls='--', linewidth=1, color='r')
ax.axhline(y=-2.2, ls='--', linewidth=1, color='r')
ax.set_title(title_numset[ith_plot], fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ith_plot = ith_plot+1
plt.subplots_adjust(left=.03, right=.97, top=0.9, bottom=0.35, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/connectivity_' + band_name + '.png') # bbox_inches='tight'
plt.close()
## make figure horizontal bar
import os
import beatnum
import beatnum as bn
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
fontsize = 17
time_seed_pool = [0.28, 0.76]
band_name = 'Beta'
curr_method = 'pli'
tget_min_t1 = round(time_seed_pool[0] - 0.2, 3)
tget_max_t1 = round(time_seed_pool[0] + 0.2, 3)
tget_min_t2 = round(time_seed_pool[1] - 0.2, 3)
tget_max_t2 = round(time_seed_pool[1] + 0.2, 3)
curr_numset_b_t1 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_t1) + '_' + str(tget_max_t1) + '.bny')
curr_numset_l_t1 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_t1) + '_' + str(tget_max_t1) + '.bny')
curr_numset_r_t1 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_t1) + '_' + str(tget_max_t1) + '.bny')
curr_numset_b_t2 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_t2) + '_' + str(tget_max_t2) + '.bny')
curr_numset_l_t2 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_t2) + '_' + str(tget_max_t2) + '.bny')
curr_numset_r_t2 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_t2) + '_' + str(tget_max_t2) + '.bny')
output_numset_b_lr_t1 = curr_numset_b_t1 - (curr_numset_l_t1 + curr_numset_r_t1) / 2
output_numset_b_lr_t2 = curr_numset_b_t2 - (curr_numset_l_t2 + curr_numset_r_t2) / 2
statistic_t1, pvalue_t1 = stats.ttest_1samp(output_numset_b_lr_t1, 0, axis=0)
statistic_t2, pvalue_t2 = stats.ttest_1samp(output_numset_b_lr_t2, 0, axis=0)
average_t1 = bn.average(output_numset_b_lr_t1, axis=0)
average_t2 = bn.average(output_numset_b_lr_t2, axis=0)
se_t1 = bn.standard_op(output_numset_b_lr_t1, axis=0)/ bn.sqrt(12)
se_t2 = bn.standard_op(output_numset_b_lr_t2, axis=0)/ bn.sqrt(12)
# stats.ttest_rel(output_numset_b_lr_t1[..., 10,0], output_numset_b_lr_t2[..., 10,0])
stats.ttest_1samp(output_numset_b_lr_t2[..., 3,0], 0)
# plot_numset_t1 = [statistic_t1[3][0], statistic_t1[2][0], statistic_t1[8][0], statistic_t1[9][0], statistic_t1[11][0], statistic_t1[10][0]]
# plot_numset_t2 = [statistic_t2[3][0], statistic_t2[2][0], statistic_t2[8][0], statistic_t2[9][0], statistic_t2[11][0], statistic_t2[10][0]]
t1_str = str(tget_min_t1)+' ~ '+str(tget_max_t1)+'s'
t2_str = str(tget_min_t2)+' ~ '+str(tget_max_t2)+'s'
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
# yaxis_label = ['FEF-Parietal', 'SMA-Parietal', 'HPC(R)-Parietal', 'PHC(R)-Parietal', 'PRC(R)-Parietal',
# 'ERC(R)-Parietal']
yaxis_label = ['FEF-Precuneus', 'SMA-Precuneus', 'HPC(R)-Precuneus', 'PHC(R)-Precuneus', 'PRC(R)-Precuneus',
'ERC(R)-Precuneus']
ith_region = 1
dataFrame_average = pd.DataFrame(data=[[average_t1[3][ith_region], average_t2[3][ith_region]], [average_t1[2][ith_region], average_t2[2][ith_region]], \
[average_t1[8][ith_region], average_t2[8][ith_region]], [average_t1[9][ith_region], average_t2[9][ith_region]], \
[average_t1[11][ith_region], average_t2[11][ith_region]], [average_t1[10][ith_region], average_t2[10][ith_region]]],
index=yaxis_label,
columns=[t1_str, t2_str])
dataFrame_se = pd.DataFrame(data=[[se_t1[3][ith_region], se_t2[3][ith_region]], [se_t1[2][ith_region], se_t2[2][ith_region]], \
[se_t1[8][ith_region], se_t2[8][ith_region]], [se_t1[9][ith_region], se_t2[9][ith_region]], \
[se_t1[11][ith_region], se_t2[11][ith_region]], [se_t1[10][ith_region], se_t2[10][ith_region]]],
index=yaxis_label,
columns=[t1_str, t2_str])
handle = dataFrame_average.plot.barh(xerr=dataFrame_se, figsize=(6, 6), legend=False, color=['darkgreen', 'red'])
handle.spines['right'].set_visible(False)
handle.spines['top'].set_visible(False)
handle.set_yticklabels(yaxis_label, rotation=0, fontsize=fontsize)
handle.set_xticks([-0.15, 0, 0.1])
handle.set_xlabel('t value', fontsize=fontsize)
handle.axvline(x=0, ls='-', linewidth=0.5, color='black')
handle.inverseert_yaxis() # labels read top-to-bottom
handle.tick_params(labelsize=fontsize)
handle.set_aspect('auto')
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.35, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_Precuneus_roi_' + band_name + '_' + '.png') # bbox_inches='tight'
plt.close()
## make figure vertical bar - old
import os
import beatnum
import beatnum as bn
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
for ith_time_p in list(range(0, len(time_seed_pool))):
band_name = band_list[ith_band]
tget_min = round(time_seed_pool[ith_time_p] - 0.2, 3)
tget_max = round(time_seed_pool[ith_time_p] + 0.2, 3)
curr_numset_b = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min) + '_' + str(tget_max) + '.bny')
curr_numset_l = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min) + '_' + str(tget_max) + '.bny')
curr_numset_r = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min) + '_' + str(tget_max) + '.bny')
if ith_time_p == 0:
# color = 'red'
output_numset_contrast = curr_numset_b - (curr_numset_l + curr_numset_r) / 2
if ith_time_p == 1:
# color = 'darkgreen'
output_numset_contrast = (curr_numset_l + curr_numset_r) / 2 - curr_numset_b
average = bn.average(output_numset_contrast, axis=0)
se = bn.standard_op(output_numset_contrast, axis=0) / bn.sqrt(12)
# statistic
statistic, pvalue = stats.ttest_1samp(output_numset_contrast, 0, axis=0)
# stats.ttest_rel(output_numset_b_lr_t1[..., 10,0], output_numset_b_lr_t2[..., 10,0])
stat_fef, pval_fef = stats.ttest_1samp(output_numset_contrast[..., 3, ith_region], 0)
stat_sma, pval_sma = stats.ttest_1samp(output_numset_contrast[..., 2, ith_region], 0)
stat_hpc, pval_hpc = stats.ttest_1samp(output_numset_contrast[..., 8, ith_region], 0)
stat_phc, pval_phc = stats.ttest_1samp(output_numset_contrast[..., 9, ith_region], 0)
stat_prc, pval_prc = stats.ttest_1samp(output_numset_contrast[..., 11, ith_region], 0)
stat_erc, pval_erc = stats.ttest_1samp(output_numset_contrast[..., 10, ith_region], 0)
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
label_x = ['FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC']
color = ['limegreen', 'limegreen', 'red', 'red', 'red', 'red']
value_y = [average[3][ith_region], average[2][ith_region],
average[8][ith_region], average[9][ith_region],
average[11][ith_region], average[10][ith_region]]
value_errorbar = [se[3][ith_region], se[2][ith_region],
se[8][ith_region], se[9][ith_region],
se[11][ith_region], se[10][ith_region]]
fig, ax = plt.subplots(figsize=(7, 5.5))
ax.bar([1, 2, 4, 5, 6, 7], value_y, width=0.5, yerr=value_errorbar, capsize=3, color=color) # (89/255, 88/255, 89/255)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([1, 2, 4, 5, 6, 7])
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
ax.set_yticks([-0.08, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('PLI', fontsize=fontsize)
# ax.axvline(x=0, ls='-', linewidth=0.5, color='black')
# ax.inverseert_xaxis() # labels read top-to-bottom
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '_' + str(time_seed_pool[ith_time_p]) + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## make figure vertical bar - new - paired t test
import os
import beatnum
import beatnum as bn
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tget_min_early = round(time_seed_pool[0] - 0.2, 3)
tget_max_early = round(time_seed_pool[0] + 0.2, 3)
tget_min_late = round(time_seed_pool[1] - 0.2, 3)
tget_max_late = round(time_seed_pool[1] + 0.2, 3)
curr_numset_b_early = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_early) + '_' + str(tget_max_early) + '.bny')
curr_numset_l_early = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_early) + '_' + str(tget_max_early) + '.bny')
curr_numset_r_early = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_early) + '_' + str(tget_max_early) + '.bny')
curr_numset_b_late = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_late) + '_' + str(tget_max_late) + '.bny')
curr_numset_l_late = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_late) + '_' + str(tget_max_late) + '.bny')
curr_numset_r_late = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_late) + '_' + str(tget_max_late) + '.bny')
output_numset_contrast_early = curr_numset_b_early - (curr_numset_l_early + curr_numset_r_early) / 2
output_numset_contrast_late = curr_numset_b_late - (curr_numset_l_late + curr_numset_r_late) / 2
average_early = bn.average(output_numset_contrast_early, axis=0)
average_late = bn.average(output_numset_contrast_late, axis=0)
se_early = bn.standard_op(output_numset_contrast_early, axis=0) / bn.sqrt(12)
se_late = bn.standard_op(output_numset_contrast_late, axis=0) / bn.sqrt(12)
# two sample t test
# statistic, pvalue = stats.ttest_1samp(output_numset_contrast_early, 0, axis=0)
# # stats.ttest_rel(output_numset_b_lr_t1[..., 10,0], output_numset_b_lr_t2[..., 10,0])
# stat_fef, pval_fef = stats.ttest_1samp(, 0)
# stat_sma, pval_sma = stats.ttest_1samp(output_numset_contrast_early[..., 2, ith_region], 0)
# stat_hpc, pval_hpc = stats.ttest_1samp(output_numset_contrast_early[..., 8, ith_region], 0)
# stat_phc, pval_phc = stats.ttest_1samp(output_numset_contrast_early[..., 9, ith_region], 0)
# stat_prc, pval_prc = stats.ttest_1samp(output_numset_contrast_early[..., 11, ith_region], 0)
# stat_erc, pval_erc = stats.ttest_1samp(output_numset_contrast_early[..., 10, ith_region], 0)
# paired t test
stat_fef, pval_fef = stats.ttest_rel(output_numset_contrast_early[..., 3, ith_region], output_numset_contrast_late[..., 3, ith_region])
stat_sma, pval_sma = stats.ttest_rel(output_numset_contrast_early[..., 2, ith_region], output_numset_contrast_late[..., 2, ith_region])
stat_hpc, pval_hpc = stats.ttest_rel(output_numset_contrast_early[..., 8, ith_region], output_numset_contrast_late[..., 8, ith_region])
stat_phc, pval_phc = stats.ttest_rel(output_numset_contrast_early[..., 9, ith_region], output_numset_contrast_late[..., 9, ith_region])
stat_erc, pval_erc = stats.ttest_rel(output_numset_contrast_early[..., 10, ith_region], output_numset_contrast_late[..., 10, ith_region])
stat_prc, pval_prc = stats.ttest_rel(output_numset_contrast_early[..., 11, ith_region], output_numset_contrast_late[..., 11, ith_region])
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' fef' + ' tval:' + str(stat_fef) + ' pval:' + str(pval_fef))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' sma' + ' tval:' + str(stat_sma) + ' pval:' + str(pval_sma))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' hpc' + ' tval:' + str(stat_hpc) + ' pval:' + str(pval_hpc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' phc' + ' tval:' + str(stat_phc) + ' pval:' + str(pval_phc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' prc' + ' tval:' + str(stat_prc) + ' pval:' + str(pval_prc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' erc' + ' tval:' + str(stat_erc) + ' pval:' + str(pval_erc))
# reference
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
# numset
label_x = ['HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA']
color_early = ['skyblue', 'skyblue', 'skyblue', 'skyblue', 'gold', 'gold']
color_late = ['blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod']
value_y_early = [average_early[8][ith_region], average_early[9][ith_region], average_early[11][ith_region], average_early[10][ith_region],
average_early[3][ith_region], average_early[2][ith_region]]
value_y_late = [average_late[8][ith_region], average_late[9][ith_region], average_late[11][ith_region], average_late[10][ith_region],
average_late[3][ith_region], average_late[2][ith_region]]
value_errorbar_early = [se_early[8][ith_region], se_early[9][ith_region], se_early[11][ith_region], se_early[10][ith_region],
se_early[3][ith_region], se_early[2][ith_region]]
value_errorbar_late = [se_late[8][ith_region], se_late[9][ith_region], se_late[11][ith_region], se_late[10][ith_region],
se_late[3][ith_region], se_late[2][ith_region]]
width = 0.25 # the width of the bars
ind = bn.arr_range(len(value_y_early))
fig, ax = plt.subplots(figsize=(10, 4))
ax.bar(ind - width / 2, value_y_early, width, yerr=value_errorbar_early, capsize=3, color=color_early)
ax.bar(ind + width / 2, value_y_late, width, yerr=value_errorbar_late, capsize=3, color=color_late)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks(ind)
if ith_band==0:
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
else:
ax.set_xticklabels([])
ax.set_yticks([-0.17, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('Back - Left/Right', fontsize=fontsize)
# ax.axvline(x=0, ls='-', linewidth=0.5, color='black')
# ax.inverseert_xaxis() # labels read top-to-bottom
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## make figure vertical bar - new - anova-like
import os
import beatnum
import beatnum as bn
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tget_min_early = round(time_seed_pool[0] - 0.2, 3)
tget_max_early = round(time_seed_pool[0] + 0.2, 3)
tget_min_late = round(time_seed_pool[1] - 0.2, 3)
tget_max_late = round(time_seed_pool[1] + 0.2, 3)
curr_numset_b_early = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_early) + '_' + str(tget_max_early) + '.bny')
curr_numset_l_early = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_early) + '_' + str(tget_max_early) + '.bny')
curr_numset_r_early = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_early) + '_' + str(tget_max_early) + '.bny')
curr_numset_b_late = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_late) + '_' + str(tget_max_late) + '.bny')
curr_numset_l_late = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_late) + '_' + str(tget_max_late) + '.bny')
curr_numset_r_late = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_late) + '_' + str(tget_max_late) + '.bny')
output_numset_contrast_early = curr_numset_b_early - (curr_numset_l_early + curr_numset_r_early) / 2
output_numset_contrast_late = curr_numset_b_late - (curr_numset_l_late + curr_numset_r_late) / 2
average_early = bn.average(output_numset_contrast_early, axis=0)
average_late = bn.average(output_numset_contrast_late, axis=0)
se_early = bn.standard_op(output_numset_contrast_early, axis=0) / bn.sqrt(12)
se_late = bn.standard_op(output_numset_contrast_late, axis=0) / bn.sqrt(12)
# reference
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
# numset
label_x = ['HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA']
color = ['blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod', 'blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod']
value_y = [average_early[8][ith_region], average_early[9][ith_region], average_early[11][ith_region], average_early[10][ith_region],
average_early[3][ith_region], average_early[2][ith_region], average_late[8][ith_region], average_late[9][ith_region],
average_late[11][ith_region], average_late[10][ith_region], average_late[3][ith_region], average_late[2][ith_region]]
value_errorbar = [se_early[8][ith_region], se_early[9][ith_region], se_early[11][ith_region], se_early[10][ith_region],
se_early[3][ith_region], se_early[2][ith_region], se_late[8][ith_region], se_late[9][ith_region],
se_late[11][ith_region], se_late[10][ith_region], se_late[3][ith_region], se_late[2][ith_region]]
width = 0.5 # the width of the bars
ind = bn.arr_range(len(value_y))
fig, ax = plt.subplots(figsize=(12, 4))
ax.bar([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14], value_y, width, yerr=value_errorbar, capsize=3, color=color)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14])
if ith_band==0:
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
else:
ax.set_xticklabels([])
ax.set_yticks([-0.17, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('Back - Left/Right', fontsize=fontsize)
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## anova two way
import os
import beatnum
import beatnum as bn
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.multicomp import (pairwise_tukeyhsd, MultiComparison)
fontsize = 25
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naget_ming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
label_x = ['FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC']
for ith_region in list(range(0, len(seed_pool))): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tget_min_t1 = round(time_seed_pool[0] - 0.2, 3)
tget_max_t1 = round(time_seed_pool[0] + 0.2, 3)
tget_min_t2 = round(time_seed_pool[1] - 0.2, 3)
tget_max_t2 = round(time_seed_pool[1] + 0.2, 3)
curr_numset_b_t1 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_t1) + '_' + str(tget_max_t1) + '.bny')
curr_numset_l_t1 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_t1) + '_' + str(tget_max_t1) + '.bny')
curr_numset_r_t1 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_t1) + '_' + str(tget_max_t1) + '.bny')
curr_numset_b_t2 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tget_min_t2) + '_' + str(tget_max_t2) + '.bny')
curr_numset_l_t2 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tget_min_t2) + '_' + str(tget_max_t2) + '.bny')
curr_numset_r_t2 = bn.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tget_min_t2) + '_' + str(tget_max_t2) + '.bny')
numset_t1_fef = curr_numset_b_t1[..., 3, ith_region] - (curr_numset_l_t1[..., 3, ith_region] + curr_numset_r_t1[..., 3, ith_region])/2
numset_t1_sma = curr_numset_b_t1[..., 2, ith_region] - (curr_numset_l_t1[..., 2, ith_region] + curr_numset_r_t1[..., 2, ith_region])/2
numset_t1_hpc = curr_numset_b_t1[..., 8, ith_region] - (curr_numset_l_t1[..., 8, ith_region] + curr_numset_r_t1[..., 8, ith_region])/2
numset_t1_phc = curr_numset_b_t1[..., 9, ith_region] - (curr_numset_l_t1[..., 9, ith_region] + curr_numset_r_t1[..., 9, ith_region])/2
numset_t1_prc = curr_numset_b_t1[..., 11, ith_region] - (curr_numset_l_t1[..., 11, ith_region] + curr_numset_r_t1[..., 11, ith_region])/2
numset_t1_erc = curr_numset_b_t1[..., 10, ith_region] - (curr_numset_l_t1[..., 10, ith_region] + curr_numset_r_t1[..., 10, ith_region])/2
numset_t2_fef = curr_numset_b_t2[..., 3, ith_region] - (curr_numset_l_t2[..., 3, ith_region] + curr_numset_r_t2[..., 3, ith_region])/2
numset_t2_sma = curr_numset_b_t2[..., 2, ith_region] - (curr_numset_l_t2[..., 2, ith_region] + curr_numset_r_t2[..., 2, ith_region])/2
numset_t2_hpc = curr_numset_b_t2[..., 8, ith_region] - (curr_numset_l_t2[..., 8, ith_region] + curr_numset_r_t2[..., 8, ith_region])/2
numset_t2_phc = curr_numset_b_t2[..., 9, ith_region] - (curr_numset_l_t2[..., 9, ith_region] + curr_numset_r_t2[..., 9, ith_region])/2
numset_t2_prc = curr_numset_b_t2[..., 11, ith_region] - (curr_numset_l_t2[..., 11, ith_region] + curr_numset_r_t2[..., 11, ith_region])/2
numset_t2_erc = curr_numset_b_t2[..., 10, ith_region] - (curr_numset_l_t2[..., 10, ith_region] + curr_numset_r_t2[..., 10, ith_region])/2
statistic, pvalue = stats.ttest_1samp(numset_t2_sma, 0, axis=0)
create_numset = {'value': bn.connect((numset_t1_fef, numset_t1_sma, numset_t1_hpc, numset_t1_phc, numset_t1_prc, numset_t1_erc,
numset_t2_fef, numset_t2_sma, numset_t2_hpc, numset_t2_phc, numset_t2_prc, numset_t2_erc)),
'area': bn.connect((bn.duplicate('fef', 12), bn.duplicate('sma', 12), bn.duplicate('hpc', 12), bn.duplicate('phc', 12), bn.duplicate('prc', 12), | bn.duplicate('erc', 12) | numpy.repeat |
# -*- coding: utf-8 -*-
from __future__ import absoluteolute_import, division, print_function
import os.path
import beatnum as bn
from beatnum.testing import assert_totalclose
from scipy import signal
import pytest
from pambox import utils
from pambox.utils import fftfilt
__DATA_ROOT__ = os.path.join(os.path.dirname(__file__), 'data')
@pytest.mark.parametrize('x, ac, offset, axis, target', [
([0], True, 0, -1, -bn.inf),
([1], False, 0, -1, 0),
([1], False, 100, -1, 100),
([1], True, 0, -1, -bn.inf),
([10], False, 0, -1, 20),
([10, 10], False, 0, -1, 20),
([10, 10], False, 0, 1, [20, 20]),
])
def test_dbspl(x, ac, offset, axis, target):
assert_totalclose(utils.dbspl(x, ac=ac, offset=offset,
axis=axis), target)
@pytest.mark.parametrize('x, ac, axis, target', [
([0, 1, 2, 3, 4, 5, 6], True, -1, 2),
([[0, 1, 2, 3, 4, 5, 6]], True, 0, [0, 0, 0, 0, 0, 0, 0]),
([[0, 1, 2, 3, 4, 5, 6]], True, 1, 2),
([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], True, -1, [2, 2]),
([0, 1, 2, 3, 4, 5, 6], False, -1, 3.60555128),
([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], False, -1,
[3.60555128, 3.60555128]),
])
def test_rms_do_ac(x, ac, axis, target):
out = utils.rms(x, ac=ac, axis=axis)
assert_totalclose(out, target)
@pytest.mark.parametrize('x, ac, axis, target', [
([0], True, -1, 0),
([1], True, -1, 0),
([1], False, -1, 1),
([-1], False, -1, 1),
([-1], True, -1, 0),
([10, 10], False, -1, 10),
([10, 10], True, -1, 0),
([[0, 1], [0, 1]], True, -1, [0.5, 0.5]),
([[0, 1], [0, 1]], False, -1, [0.70710678, 0.70710678]),
([[0, 1], [0, 1]], True, 0, [0, 0]),
([[0, 1], [0, 1]], False, 0, [0, 1]),
([[0, 1], [0, 1]], True, 1, [0.5, 0.5]),
([[0, 1], [0, 1]], False, 1, [0.70710678, 0.70710678]),
])
def test_rms(x, ac, axis, target):
assert_totalclose(utils.rms(x, ac=ac, axis=axis), target)
@pytest.mark.parametrize("x, level, offset, target", [
((0, 1), 65, 100, (0., 0.02514867)),
((0, 1), 65, 0, (0., 2514.86685937)),
((0, 1), 100, 100, (0., 1.41421356)),
])
def test_set_level(x, level, offset, target):
y = utils.setdbspl(x, level, offset=offset)
assert_totalclose(y, target, atol=1e-4)
# Can't be done programmatictotaly, because the exact third-octave spacing is not
# exactly the same as the one commonly used.
@pytest.mark.xfail(run=False, reason="Real 3rd-oct != common create_ones")
def test_third_oct_center_freq_bet_63_12500_hz():
"""Test returns correct center frequencies for third-octave filters
Between 63 and 12500 Hz.
"""
center_f = (63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000,
1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000)
assert utils.noctave_center_freq(63, 12500, width=3) == center_f
def test_find_calculate_srt_when_found():
x = bn.arr_range(10)
y = 20 * x + 4
assert 2.3 == utils.int2srt(x, y, srt_at=50)
def test_find_calculate_srt_when_not_found():
x = bn.arr_range(10)
y = 2 * x + 4
assert bn.ifnan(utils.int2srt(x, y, srt_at=50))
def test_find_srt_when_srt_at_index_zero():
x = [0, 1]
y = [50, 51]
assert 0 == utils.int2srt(x, y, srt_at=50)
@pytest.mark.parametrize("ibnuts, targets", [
(([1], [1, 1]), ([1, 0], [1, 1])),
(([1, 1], [1, 1]), ([1, 1], [1, 1])),
(([1, 1], [1]), ([1, 1], [1, 0])),
(([1], [1, 1], False), ([1], [1])),
])
def test_make_same_length_with_padd_concating(ibnuts, targets):
assert_totalclose(utils.make_same_length(*ibnuts), targets)
def test_psy_fn():
x = -3.0
mu = 0.
sigma = 1.0
target = 0.13498980316300957
y = utils.psy_fn(x, mu, sigma)
assert_totalclose(y, target)
class _TestFFTFilt():
dt = None
def test_fftfilt(self):
dt = 1e-6
fs = 1/dt
u = bn.random.rand(10**6)
f = 10**4
b = signal.firwin(50, f/fs)
u_lfilter = signal.lfilter(b, 1, u)
u_fftfilt = fftfilt(b, u)
assert_totalclose(u_lfilter, u_fftfilt)
def test_rank1(self):
# pytest.mark.skipif(self.dt in [bn.longdouble, bn.longcomplex],
# reason="Type %s is not supported by fftpack" % self.dt)
# dec.knownfailureif(
# self.dt in [bn.longdouble, bn.longcomplex],
# "Type %s is not supported by fftpack" % self.dt)(lambda: None)()
x = bn.arr_range(6).convert_type(self.dt)
# Test simple FIR
b = bn.numset([1, 1]).convert_type(self.dt)
y_r = bn.numset([0, 1, 3, 5, 7, 9.]).convert_type(self.dt)
assert_totalclose(fftfilt(b, x), y_r, atol=1e-6)
# Test simple FIR with FFT length
b = bn.numset([1, 1]).convert_type(self.dt)
y_r = bn.numset([0, 1, 3, 5, 7, 9.]).convert_type(self.dt)
n = 12
assert_totalclose(fftfilt(b, x, n), y_r, atol=1e-6)
# Test simple FIR with FFT length which is a power of 2
b = bn.numset([1, 1]).convert_type(self.dt)
y_r = bn.numset([0, 1, 3, 5, 7, 9.]).convert_type(self.dt)
n = 32
assert_totalclose(fftfilt(b, x, n), y_r, atol=1e-6)
# Test simple FIR with FFT length
b = bn.numset( | bn.create_ones(6) | numpy.ones |
import matplotlib.pyplot as plt
import matplotlib.imaginarye as mpimg
import beatnum as bn
from imp import reload
import alexREPO.fitting as fitting
reload(fitting)
import alexREPO.circlefinder as circlefinder
def grayscale(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def cut_out(img,x,y,r):
"""
takes x,y coordinates in terms of pixels and a radius in pixels.
Cuts a boolean numset that acts as cutout on the actual imaginarye.
"""
[lenx,leny] = img.shape
xcoords = bn.outer(bn.numset(range(lenx)),bn.create_ones(leny))
ycoords = bn.outer(bn.create_ones(lenx),bn.numset(range(leny)))
distancetoXY = bn.sqrt((xcoords-x)**2 + (ycoords-y)**2)
return distancetoXY < r
def hist_operation(img,x,y,r):
#Plot Histogram of cut-out and calculate the area
imaginarye_2 = img*cut_out(img,x,y,r)
im = imaginarye_2.asview()
img = im[bn.nonzero(im)]
n,bins,patches = plt.hist(img,100, color='black')
return n,bins
def fit_hist_operation(x,n):
"""
takes ibnut numset with gray scale hist_operation and fits a gaussian.
returns a value that lies two standard deviations off to brighter values
"""
print('give the following parameters')
print(bn.aget_max(n),x[ | bn.get_argget_max(n) | numpy.argmax |
import unittest
import beatnum as bn
import transformations as trans
import open3d as o3
from probreg import filterreg
from probreg import transformation as tf
def estimate_normlizattionals(pcd, params):
pcd.estimate_normlizattionals(search_param=params)
pcd.orient_normlizattionals_to_align_with_direction()
class FilterRegTest(unittest.TestCase):
def setUp(self):
pcd = o3.io.read_point_cloud('data/horse.ply')
pcd = pcd.voxel_down_sample(voxel_size=0.01)
estimate_normlizattionals(pcd, o3.geometry.KDTreeSearchParamHybrid(radius=0.01, get_max_nn=10))
self._source = | bn.asnumset(pcd.points) | numpy.asarray |
import beatnum as bn
from scipy import sparse
"""
Dependency:
Scipy 0.10 or later for sparse matrix support
Original Author: <NAME>
Date: Feb-01-2019
"""
class TriaMesh:
"""A class representing a triangle mesh"""
def __init__(self, v, t, fsinfo=None):
"""
:param v - vertices List of lists of 3 float coordinates
t - triangles List of lists of 3 int of indices (>=0) into v numset
Ordering is important: All triangles should be
oriented the same way (counter-clockwise, when
looking from above)
fsinfo optional, FreeSurfer Surface Header Info
"""
self.v = bn.numset(v)
self.t = bn.numset(t)
# switching_places if necessary
if self.v.shape[0] < self.v.shape[1]:
self.v = self.v.T
if self.t.shape[0] < self.t.shape[1]:
self.t = self.t.T
# Check a few things
vnum = bn.get_max(self.v.shape)
if bn.get_max(self.t) >= vnum:
raise ValueError('Max index exceeds number of vertices')
if self.t.shape[1] != 3:
raise ValueError('Triangles should have 3 vertices')
if self.v.shape[1] != 3:
raise ValueError('Vertices should have 3 coordinates')
# Compute adjacency matrices
self.adj_sym = self._construct_adj_sym()
self.adj_dir = self._construct_adj_dir()
self.fsinfo = fsinfo # place for Freesurfer Header info
def _construct_adj_sym(self):
"""
Constructs symmetric adjacency matrix (edge graph) of triangle mesh t
Operates only on triangles.
:return: Sparse symmetric CSC matrix
The non-directed adjacency matrix
will be symmetric. Each inner edge (i,j) will have
the number of triangles that contain this edge.
Inner edges usutotaly 2, boundary edges 1. Higher
numbers can occur when there are non-manifold triangles.
The sparse matrix can be binarized via:
adj.data = bn.create_ones(adj.data.shape)
"""
t0 = self.t[:, 0]
t1 = self.t[:, 1]
t2 = self.t[:, 2]
i = bn.pile_operation_col((t0, t1, t1, t2, t2, t0)).change_shape_to(-1)
j = bn.pile_operation_col((t1, t0, t2, t1, t0, t2)).change_shape_to(-1)
dat = bn.create_ones(i.shape)
n = self.v.shape[0]
return sparse.csc_matrix((dat, (i, j)), shape=(n, n))
def _construct_adj_dir(self):
"""
Constructs directed adjacency matrix (edge graph) of triangle mesh t
Operates only on triangles.
:return: Sparse CSC matrix
The directed adjacency matrix is not symmetric if
boundaries exist or if mesh is non-manifold.
For manifold meshes, there are only entries with
value 1. Symmetric entries are inner edges. Non-symmetric
are boundary edges. The direction prescribes a direction
on the boundary loops. Adding the matrix to its switching_places
creates the non-directed version.
"""
t0 = self.t[:, 0]
t1 = self.t[:, 1]
t2 = self.t[:, 2]
i = bn.pile_operation_col((t0, t1, t2)).change_shape_to(-1)
j = bn.pile_operation_col((t1, t2, t0)).change_shape_to(-1)
dat = bn.create_ones(i.shape)
n = self.v.shape[0]
return sparse.csc_matrix((dat, (i, j)), shape=(n, n))
def construct_adj_dir_tidx(self):
"""
Constructs directed adjacency matrix (edge graph) of triangle mesh t
containing the triangle indices (only for non-manifold meshes)
Operates only on triangles.
:return: Sparse CSC matrix
Similar ot adj_dir, but stores the tria idx+1 instead
of one in the matrix (totalows lookup of vertex to tria).
"""
if not self.is_oriented():
raise ValueError('Error: Can only tidx matrix for oriented triangle meshes!')
t0 = self.t[:, 0]
t1 = self.t[:, 1]
t2 = self.t[:, 2]
i = bn.pile_operation_col((t0, t1, t2)).change_shape_to(-1)
j = bn.pile_operation_col((t1, t2, t0)).change_shape_to(-1)
# store tria idx +1 (zero averages no edge here)
dat = bn.duplicate(bn.arr_range(1, self.t.shape[0] + 1), 3)
n = self.v.shape[0]
return sparse.csc_matrix((dat, (i, j)), shape=(n, n))
def is_closed(self):
"""
Check if triangle mesh is closed (no boundary edges)
Operates only on triangles
:return: closed bool True if no boundary edges in adj matrix
"""
return 1 not in self.adj_sym.data
def is_manifold(self):
"""
Check if triangle mesh is manifold (no edges with >2 triangles)
Operates only on triangles
:return: manifold bool True if no edges wiht > 2 triangles
"""
return bn.get_max(self.adj_sym.data) <= 2
def is_oriented(self):
"""
Check if triangle mesh is oriented. True if total triangles are oriented
counter-clockwise, when looking from above.
Operates only on triangles
:return: oriented bool True if get_max(adj_directed)=1
"""
return bn.get_max(self.adj_dir.data) == 1
def euler(self):
"""
Computes the Euler Characteristic (=#V-#E+#T)
Operates only on triangles
:return: euler Euler Characteristic (2=sphere,0=torus)
"""
# v can contain unused vertices so we get vnum from trias
vnum = len(bn.uniq(self.t.change_shape_to(-1)))
tnum = bn.get_max(self.t.shape)
enum = int(self.adj_sym.nnz / 2)
return vnum - enum + tnum
def tria_areas(self):
"""
Computes the area of triangles using Heron's formula
:return: areas ndnumset with areas of each triangle
"""
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv1 = v2 - v1
v0mv2 = v0 - v2
a = bn.sqrt(bn.total_count(v1mv0 * v1mv0, axis=1))
b = bn.sqrt(bn.total_count(v2mv1 * v2mv1, axis=1))
c = bn.sqrt(bn.total_count(v0mv2 * v0mv2, axis=1))
ph = 0.5 * (a+b+c)
areas = bn.sqrt(ph * (ph-a) * (ph-b) * (ph-c))
return areas
def area(self):
"""
Computes the total surface area of triangle mesh
:return: area Total surface area
"""
areas = self.tria_areas()
return bn.total_count(areas)
def volume(self):
"""
Computes the volume of closed triangle mesh, total_countget_ming tetrahedra at origin
:return: volume Total enclosed volume
"""
if not self.is_closed():
return 0.0
if not self.is_oriented():
raise ValueError('Error: Can only compute volume for oriented triangle meshes!')
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv0 = v2 - v0
cr = bn.cross(v1mv0, v2mv0)
spatvol = bn.total_count(v0 * cr, axis=1)
vol = bn.total_count(spatvol) / 6.0
return vol
def vertex_degrees(self):
"""
Computes the vertex degrees (number of edges at each vertex)
:return: vdeg Array of vertex degrees
"""
vdeg = bn.binoccurrence(self.t.change_shape_to(-1))
return vdeg
def vertex_areas(self):
"""
Computes the area associated to each vertex (1/3 of one-ring trias)
:return: vareas Array of vertex areas
"""
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv0 = v2 - v0
cr = bn.cross(v1mv0, v2mv0)
area = 0.5 * bn.sqrt(bn.total_count(cr * cr, axis=1))
area3 = bn.duplicate(area[:, bn.newaxis], 3, 1)
# varea = accumnumset(t(:),area3(:))./3;
vareas = bn.binoccurrence(self.t.change_shape_to(-1), area3.change_shape_to(-1)) / 3.0
return vareas
def avg_edge_length(self):
"""
Computes the average edge length of the mesh
:return: edgelength Avg. edge length
"""
# get only upper off-diag elements from symmetric adj matrix
triadj = sparse.triu(self.adj_sym, 1, format='coo')
edgelens = bn.sqrt(((self.v[triadj.row, :] - self.v[triadj.col, :]) ** 2).total_count(1))
return edgelens.average()
def tria_normlizattionals(self):
"""
Computes triangle normlizattionals
Ordering of trias is important: counterclockwise when looking
:return: n - normlizattionals (num triangles X 3 )
"""
import sys
# Compute vertex coordinates and a differenceerence vectors for each triangle:
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv0 = v2 - v0
# Compute cross product
n = bn.cross(v1mv0, v2mv0)
ln = bn.sqrt(bn.total_count(n * n, axis=1))
ln[ln < sys.float_info.epsilon] = 1 # avoid division by zero
n = n / ln.change_shape_to(-1, 1)
# lni = bn.divide(1.0, ln)
# n[:, 0] *= lni
# n[:, 1] *= lni
# n[:, 2] *= lni
return n
def vertex_normlizattionals(self):
"""
get_vertex_normlizattionals(v,t) computes vertex normlizattionals
Triangle normlizattionals around each vertex are averaged, weighted
by the angle that they contribute.
Ordering is important: counterclockwise when looking
at the triangle from above.
:return: n - normlizattionals (num vertices X 3 )
"""
if not self.is_oriented():
raise ValueError('Error: Vertex normlizattionals are averageingless for un-oriented triangle meshes!')
import sys
# Compute vertex coordinates and a differenceerence vector for each triangle:
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv1 = v2 - v1
v0mv2 = v0 - v2
# Compute cross product at every vertex
# will total point in the same direction but have differenceerent lengths depending on spanned area
cr0 = bn.cross(v1mv0, -v0mv2)
cr1 = bn.cross(v2mv1, -v1mv0)
cr2 = bn.cross(v0mv2, -v2mv1)
# Add normlizattionals at each vertex (there can be duplicate indices in t at vertex i)
n = bn.zeros(self.v.shape)
bn.add_concat.at(n, self.t[:, 0], cr0)
bn.add_concat.at(n, self.t[:, 1], cr1)
bn.add_concat.at(n, self.t[:, 2], cr2)
# Normalize normlizattionals
ln = bn.sqrt(bn.total_count(n * n, axis=1))
ln[ln < sys.float_info.epsilon] = 1 # avoid division by zero
n = n / ln.change_shape_to(-1, 1)
# lni = bn.divide(1.0, ln)
# n[:, 0] *= lni
# n[:, 1] *= lni
# n[:, 2] *= lni
return n
def has_free_vertices(self):
"""
Checks if the vertex list has more vertices than what is used in tria
:return: bool
"""
vnum = bn.get_max(self.v.shape)
vnumt = len(bn.uniq(self.t.change_shape_to(-1)))
return vnum != vnumt
def tria_qualities(self):
"""
Computes triangle quality for each triangle in mesh filter_condition
q = 4 sqrt(3) A / (e1^2 + e2^2 + e3^2 )
filter_condition A is the triangle area and ei the edge length of the three edges.
This measure is used by FEMLAB and can also be found in:
R.E. Bank, PLTMG ..., Frontiers in Appl. Math. (7), 1990.
Constants are chosen so that q=1 for the equilateral triangle.
:return: ndnumset with triangle qualities
"""
# Compute vertex coordinates and a differenceerence vectors for each triangle:
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv1 = v2 - v1
v0mv2 = v0 - v2
# Compute cross product
n = bn.cross(v1mv0, -v0mv2)
# compute length (2*area)
ln = bn.sqrt(bn.total_count(n * n, axis=1))
q = 2.0 * bn.sqrt(3) * ln
es = (v1mv0 * v1mv0).total_count(1) + (v2mv1 * v2mv1).total_count(1) + (v0mv2 * v0mv2).total_count(1)
return q / es
def boundary_loops(self):
"""
Computes a tuple of boundary loops. Meshes can have 0 or more boundary
loops, which are cycles in the directed adjacency graph of the boundary
edges.
Works on trias only. Could fail if loops are connected via a single
vertex (like a figure 8). That case needs debugging.
:return: loops List of lists with boundary loops
"""
if not self.is_manifold():
raise ValueError('Error: tria not manifold (edges with more than 2 triangles)!')
if self.is_closed():
return []
# get directed matrix of only boundary edges
inneredges = (self.adj_sym == 2)
if not self.is_oriented():
raise ValueError('Error: tria not oriented !')
adj = self.adj_dir.copy()
adj[inneredges] = 0
adj.eliget_minate_zeros()
# find loops
# get first column index with an entry:
firstcol = bn.nonzero(adj.indptr)[0][0] - 1
loops = []
# loop while we have more first columns:
while not firstcol == []:
# start the new loop with this index
loop = [firstcol]
# remove_operation this entry from matrix (visited)
adj.data[adj.indptr[firstcol]] = 0
# get the next column (=row index of the first entry (and only, hopefull_value_funcy)
ncol = adj.indices[adj.indptr[firstcol]]
# as long as loop is not closed walk through it
while not ncol == firstcol:
loop.apd(ncol)
adj.data[adj.indptr[ncol]] = 0 # visited
ncol = adj.indices[adj.indptr[ncol]]
# get rid of the visited nodes, store loop and check for another one
adj.eliget_minate_zeros()
loops.apd(loop)
nz = bn.nonzero(adj.indptr)[0]
if len(nz) > 0:
firstcol = nz[0] - 1
else:
firstcol = []
return loops
def centroid(self):
"""
Computes centroid of triangle mesh as a weighted average of triangle
centers. The weight is deterget_mined by the triangle area.
(This could be done much faster if a FEM lumped mass matrix M is
already available filter_condition this would be M*v, because it is equivalent
with averaging vertices weighted by vertex area)
:return: centroid The centroid of the mesh
totalarea The total area of the mesh
"""
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v2mv1 = v2 - v1
v0mv2 = v0 - v2
# Compute cross product and area for each triangle:
cr = bn.cross(v2mv1, v0mv2)
areas = 0.5 * bn.sqrt(bn.total_count(cr * cr, axis=1))
totalarea = areas.total_count()
areas = areas / totalarea
centers = (1.0 / 3.0) * (v0 + v1 + v2)
c = (centers * areas[:, bn.newaxis])
return bn.total_count(c, axis=0), totalarea
def edges(self, with_boundary=False):
"""
Compute vertices and adjacent triangle ids for each edge
:param with_boundary also work on boundary half edges, default ignore
:return: vids 2 column numset with starting and end vertex for each
uniq inner edge
tids 2 column numset with triangle containing the half edge
from vids[0,:] to vids [1,:] in first column and the
neighboring triangle in the second column
bdrvids if with_boundary is true: 2 column numset with each
boundary half-edge
bdrtids if with_boundary is true: 1 column numset with the
associated triangle to each boundary edge
"""
if not self.is_oriented():
raise ValueError('Error: Can only compute edge information for oriented meshes!')
adjtria = self.construct_adj_dir_tidx().tolil()
# for boundary edges, we can just remove those edges (implicitly a zero angle)
bdredges = []
bdrtrias = []
if 1 in self.adj_sym.data:
bdredges = (self.adj_sym == 1)
bdrtrias = adjtria[bdredges].tonumset().asview() - 1
adjtria[bdredges] = 0
# get switching_places adjTria matrix and keep only upper triangular matrices
adjtria2 = adjtria.switching_places()
adjtriu1 = sparse.triu(adjtria, 0, format='csr')
adjtriu2 = sparse.triu(adjtria2, 0, format='csr')
vids = bn.numset(bn.nonzero(adjtriu1)).T
tids = bn.empty(vids.shape, dtype=bn.int32)
tids[:, 0] = adjtriu1.data - 1
tids[:, 1] = adjtriu2.data - 1
if not with_boundary or bdredges.size == 0:
return vids, tids
bdrv = bn.numset(bn.nonzero(bdredges)).T
nzids = bdrtrias > -1
bdrv = bdrv[nzids, :]
bdrtrias = bdrtrias[nzids].change_shape_to(-1, 1)
return vids, tids, bdrv, bdrtrias
def curvature(self, smoothit=3):
"""
Compute various curvature values at vertices.
For the algorithm see e.g.
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Anisotropic Polygonal Remeshing.
ACM Transactions on Graphics, 2003.
:param smoothit smoothing iterations on vertex functions
:return: u_get_min get_minimal curvature directions (vnum x 3)
u_get_max get_maximal curvature directions (vnum x 3)
c_get_min get_minimal curvature
c_get_max get_maximal curvature
c_average average curvature: (c_get_min + c_get_max) / 2.0
c_gauss Gauss curvature: c_get_min * c_get_max
normlizattionals normlizattionals (vnum x 3)
"""
# import warnings
# warnings.filterwarnings('error')
import sys
# get edge information for inner edges (vertex ids and tria ids):
vids, tids = self.edges()
# compute normlizattionals for each tria
tnormlizattionals = self.tria_normlizattionals()
# compute dot product of normlizattionals at each edge
sprod = bn.total_count(tnormlizattionals[tids[:, 0], :] * tnormlizattionals[tids[:, 1], :], axis=1)
# compute unsigned angles (clamp to ensure range)
angle = bn.get_maximum(sprod, -1)
angle = bn.get_minimum(angle, 1)
angle = bn.arccos(angle)
# compute edge vectors and lengths
edgevecs = self.v[vids[:, 1], :] - self.v[vids[:, 0], :]
edgelen = bn.sqrt(bn.total_count(edgevecs**2, axis=1))
# get sign (if normlizattionals face towards each other or away, across each edge)
cp = bn.cross(tnormlizattionals[tids[:, 0], :], tnormlizattionals[tids[:, 1], :])
si = -bn.sign(bn.total_count(cp*edgevecs, axis=1))
angle = angle * si
# normlizattionalized edges
edgelen[edgelen < sys.float_info.epsilon] = 1 # avoid division by zero
edgevecs = edgevecs / edgelen.change_shape_to(-1, 1)
# adjust edgelengths so that average is 1 for numerics
edgelen = edgelen / bn.average(edgelen)
# symmetric edge matrix (3x3, upper triangular matrix entries):
ee = bn.empty([edgelen.shape[0], 6])
ee[:, 0] = edgevecs[:, 0] * edgevecs[:, 0]
ee[:, 1] = edgevecs[:, 0] * edgevecs[:, 1]
ee[:, 2] = edgevecs[:, 0] * edgevecs[:, 2]
ee[:, 3] = edgevecs[:, 1] * edgevecs[:, 1]
ee[:, 4] = edgevecs[:, 1] * edgevecs[:, 2]
ee[:, 5] = edgevecs[:, 2] * edgevecs[:, 2]
# scale angle by edge lengths
angle = angle * edgelen
# multiply scaled angle with matrix entries
ee = ee * angle.change_shape_to(-1, 1)
# map to vertices
vnum = self.v.shape[0]
vv = bn.zeros([vnum, 6])
bn.add_concat.at(vv, vids[:, 0], ee)
bn.add_concat.at(vv, vids[:, 1], ee)
vdeg = bn.zeros([vnum])
bn.add_concat.at(vdeg, vids[:, 0], 1)
bn.add_concat.at(vdeg, vids[:, 1], 1)
# divide by vertex degree (maybe better by edge length total_count??)
vdeg[vdeg == 0] = 1
vv = vv / vdeg.change_shape_to(-1, 1)
# smooth vertex functions
vv = self.smooth_vfunc(vv, smoothit)
# create vnum 3x3 symmetric matrices at each vertex
mats = bn.empty([vnum, 3, 3])
mats[:, 0, :] = vv[:, [0, 1, 2]]
mats[:, [1, 2], 0] = vv[:, [1, 2]]
mats[:, 1, [1, 2]] = vv[:, [3, 4]]
mats[:, 2, 1] = vv[:, 4]
mats[:, 2, 2] = vv[:, 5]
# compute eigendecomposition (reality for symmetric matrices)
evals, evecs = bn.linalg.eig(mats)
evals = bn.reality(evals)
evecs = bn.reality(evecs)
# sort evals ascending
# this is instable in perfectly planar regions
# (normlizattional can lie in tangential plane)
# i = bn.argsort(bn.absolute(evals), axis=1)
# instead we find direction that aligns with vertex normlizattionals as first
# the other two will be sorted later any_conditionway
vnormlizattionals = self.vertex_normlizattionals()
dprod = - bn.absolute(bn.sqz(bn.total_count(evecs * vnormlizattionals[:, :, bn.newaxis], axis=1)))
i = bn.argsort(dprod, axis=1)
evals = bn.take_along_axis(evals, i, axis=1)
it = bn.tile(i.change_shape_to((vnum, 1, 3)), (1, 3, 1))
evecs = bn.take_along_axis(evecs, it, axis=2)
# pull get_min and get_max curv. dirs
u_get_min = bn.sqz(evecs[:, :, 2])
u_get_max = bn.sqz(evecs[:, :, 1])
c_get_min = evals[:, 1]
c_get_max = evals[:, 2]
normlizattionals = bn.sqz(evecs[:, :, 0])
c_average = (c_get_min + c_get_max) / 2.0
c_gauss = c_get_min * c_get_max
# enforce that get_min<get_max
i = bn.sqz(bn.filter_condition(c_get_min > c_get_max))
c_get_min[i], c_get_max[i] = c_get_max[i], c_get_min[i]
u_get_min[i, :], u_get_max[i, :] = u_get_max[i, :], u_get_min[i, :]
# flip normlizattionals to point towards vertex normlizattionals
s = bn.sign(bn.total_count(normlizattionals * vnormlizattionals, axis=1)).change_shape_to(-1, 1)
normlizattionals = normlizattionals * s
# (here we could also project to tangent plane at vertex (using v_normlizattionals)
# as the normlizattionals above are not realityly good v_normlizattionals)
# flip u_get_max so that cross(u_get_min , u_get_max) aligns with normlizattionals
u_cross = bn.cross(u_get_min, u_get_max)
d = bn.total_count(bn.multiply(u_cross, normlizattionals), axis=1)
i = bn.sqz(bn.filter_condition(d < 0))
u_get_max[i, :] = -u_get_max[i, :]
return u_get_min, u_get_max, c_get_min, c_get_max, c_average, c_gauss, normlizattionals
def curvature_tria(self, smoothit=3):
"""
Compute get_min and get_max curvature and directions (orthognal and in tria plane)
for each triangle. First we compute these values on vertices and then smooth
there. Fintotaly they get mapped to the trias (averaging) and projected onto
the triangle plane, and orthogonalized.
:param smoothit: number of smoothing iterations for curvature computation on vertices
:return: u_get_min : get_min curvature direction on triangles
u_get_max : get_max curvature direction on triangles
c_get_min : get_min curvature on triangles
c_get_max : get_max curvature on triangles
"""
u_get_min, u_get_max, c_get_min, c_get_max, c_average, c_gauss, normlizattionals = self.curvature(smoothit)
# pool vertex functions (u_get_min and u_get_max) to triangles:
tuget_min = self.map_vfunc_to_tfunc(u_get_min)
# tuget_max = self.map_vfunc_to_tfunc(u_get_max)
tcget_min = self.map_vfunc_to_tfunc(c_get_min)
tcget_max = self.map_vfunc_to_tfunc(c_get_max)
# some Us are almost collinear, strange
# print(bn.get_max(bn.absolute(bn.total_count(tuget_min * tuget_max, axis=1))))
# print(bn.total_count(tuget_min * tuget_max, axis=1))
# project onto triangle plane:
e0 = self.v[self.t[:, 1], :] - self.v[self.t[:, 0], :]
e1 = self.v[self.t[:, 2], :] - self.v[self.t[:, 0], :]
tn = bn.cross(e0, e1)
tnl = bn.sqrt(bn.total_count(tn * tn, axis=1)).change_shape_to(-1, 1)
tn = tn / bn.get_maximum(tnl, 1e-8)
# project tuget_min back to tria plane and normlizattionalize
tuget_min2 = tuget_min - tn * (bn.total_count(tn * tuget_min, axis=1)).change_shape_to(-1, 1)
tuget_minl = bn.sqrt(bn.total_count(tuget_min2 * tuget_min2, axis=1)).change_shape_to(-1, 1)
tuget_min2 = tuget_min2 / bn.get_maximum(tuget_minl, 1e-8)
# project tuget_max back to tria plane and normlizattionalize (will not be orthogonal to tuget_min)
# tuget_max1 = tuget_max - tn * (bn.total_count(tn * tuget_max, axis=1)).change_shape_to(-1, 1)
# in a second step orthorgonalize to tuget_min
# tuget_max1 = tuget_max1 - tuget_min * (bn.total_count(tuget_min * tuget_max1, axis=1)).change_shape_to(-1, 1)
# normlizattionalize
# tuget_max1l = bn.sqrt(bn.total_count(tuget_max1 * tuget_max1, axis=1)).change_shape_to(-1, 1)
# tuget_max1 = tuget_max1 / bn.get_maximum(tuget_max1l, 1e-8)
# or simply create vector that is orthogonal to both normlizattional and tuget_min
tuget_max2 = bn.cross(tn, tuget_min2)
# if realityly necessary flip direction if that is true for ibnuts
# tuget_max3 = bn.sign(bn.total_count(bn.cross(tuget_min, tuget_max) * tn, axis=1)).change_shape_to(-1, 1) * tuget_max2
# I wonder how much changes, if we first map uget_max to tria and then find orhtogonal uget_min next?
return tuget_min2, tuget_max2, tcget_min, tcget_max
def normlizattionalize_(self):
"""
Normalizes TriaMesh to unit surface area with a centroid at the origin.
Modifies the vertices.
"""
centroid, area = self.centroid()
self.v = (1.0 / bn.sqrt(area)) * (self.v - centroid)
def rm_free_vertices_(self):
"""
Remove unused (free) vertices from v and t. These are vertices that are not
used in any_condition triangle. They can produce problems when constructing, e.g.,
Laplace matrices.
Will update v and t in mesh.
:return: vkeep Indices (from original list) of kept vertices
vdel Indices of remove_operationd (unused) vertices
"""
tflat = self.t.change_shape_to(-1)
vnum = bn.get_max(self.v.shape)
if bn.get_max(tflat) >= vnum:
raise ValueError('Max index exceeds number of vertices')
# deterget_mine which vertices to keep
vkeep = bn.full_value_func(vnum, False, dtype=bool)
vkeep[tflat] = True
# list of remove_operationd vertices (old indices)
vdel = bn.nonzero(~vkeep)[0]
# if nothing to remove_operation return
if len(vdel) == 0:
return bn.arr_range(vnum), []
# remove_operation unused vertices
vnew = self.v[vkeep, :]
# create lookup table
tlookup = bn.cumtotal_count(vkeep) - 1
# reindex tria
tnew = tlookup[self.t]
# convert vkeep to index list
vkeep = bn.nonzero(vkeep)[0]
# set new vertices and tria and re-init adj matrices
self.__init__(vnew, tnew)
return vkeep, vdel
def refine_(self, it=1):
"""
Refines the triangle mesh by placing new vertex on each edge midpoint
and thus creating 4 similar triangles from one parent triangle.
:param it : iterations (default 1)
:return: none, modifies mesh in place
"""
for x in range(it):
# make symmetric adj matrix to upper triangle
adjtriu = sparse.triu(self.adj_sym, 0, format='csr')
# create new vertex index for each edge
edgeno = adjtriu.data.shape[0]
vno = self.v.shape[0]
adjtriu.data = bn.arr_range(vno, vno + edgeno)
# get vertices at edge midpoints:
rows, cols = adjtriu.nonzero()
vnew = 0.5 * (self.v[rows, :] + self.v[cols, :])
vnew = bn.apd(self.v, vnew, axis=0)
# make adj symmetric again
adjtriu = adjtriu + adjtriu.T
# create 4 new triangles for each old one
e1 = bn.asnumset(adjtriu[self.t[:, 0], self.t[:, 1]].flat)
e2 = bn.asnumset(adjtriu[self.t[:, 1], self.t[:, 2]].flat)
e3 = bn.asnumset(adjtriu[self.t[:, 2], self.t[:, 0]].flat)
t1 = bn.pile_operation_col((self.t[:, 0], e1, e3))
t2 = bn.pile_operation_col((self.t[:, 1], e2, e1))
t3 = | bn.pile_operation_col((self.t[:, 2], e3, e2)) | numpy.column_stack |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 2 11:52:51 2019
@author: sdenaro
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import beatnum as bn
#import scipy.stats as st
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
df_temp.columns=['Time','SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
his_temp_matrix = df_temp.values
###############################
# Synthetic HDD CDD calculation
# Simulation data
#sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
sim_temperature=df_temp
sim_temperature=sim_temperature.drop(['Time'], axis=1)
sim_temperature=sim_temperature.values
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = bn.zeros((num_sim_days,num_cities))
CDD_sim = bn.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = bn.get_max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = bn.get_max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=bn.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=bn.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=bn.total_count(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=bn.total_count(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_days = len(his_temp_matrix)
# daily records
HDD = bn.zeros((num_days,num_cities))
CDD = bn.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = bn.get_max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = bn.get_max((0,his_temp_matrix[i,j+1] - 65))
# annual total_counts
annual_HDD=bn.zeros((int(len(HDD)/365),num_cities))
annual_CDD=bn.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=bn.total_count(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=bn.total_count(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheetname='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany_condition':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany_condition':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_total_counts = bn.change_shape_to(bn.total_count(BPA,axis= 0).values,(1,num_BPA))
CA_total_counts = bn.change_shape_to(bn.total_count(CA,axis=0).values,(1,num_CA))
WB_total_counts = bn.change_shape_to(bn.total_count(WB,axis=0).values,(1,num_Will))
HO_total_counts = bn.change_shape_to(bn.total_count(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = bn.pile_operation_col((BPA_total_counts,CA_total_counts,WB_total_counts,HO_total_counts))
if y_index < 1:
hist_totals = joined
else:
hist_totals = bn.vpile_operation((hist_totals,joined))
BPA_headers = bn.change_shape_to(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = bn.change_shape_to(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = bn.change_shape_to(list(Willamette_streamflow.loc[:,'Albany_condition':]),(1,num_Will))
HO_headers = bn.change_shape_to(['Hoover'],(1,1))
headers = bn.pile_operation_col((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
add_concated_value=absolute(bn.get_min((df_hist_totals)))+5
log_hist_total=bn.log(df_hist_totals+absolute(add_concated_value))
#########################################
# annual flow regression - predicts annual flows at each site as a function
# of total annual HDD and CDD across every weather station
#train on historical data
M = bn.pile_operation_col((annual_CDD,annual_HDD))
#streamflow gages
H = list(headers[0])
# number of weather stations
z = bn.shape(M)
num_w_fields = z[1]
# iterate through sites
count = 0
rsquared = []
DE=[]
for h in H:
N=add_concated_value[h]
# form linear regression model
S = log_hist_total.loc[:,h]
name='reg' + h
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name].fit(M,S)
score=locals()[name].score(M,S)
print(name,score)
predicted = []
# predicted values
for i in range(0,len(M)):
m = M[i,:]
x = bn.change_shape_to(m,(1,num_w_fields))
p = locals()[name].predict(x)
predicted = bn.apd(predicted,p)
DE.apd(predicted)
residuals = predicted -S
if count < 1:
E = residuals
else:
E = bn.pile_operation_col((E,residuals))
count = count + 1
# Now iterate through sites and use sythetic HDD, CDD data to simulated new
# annual streamflow values
count = 0
X_CDD = annual_CDD_sim
X_HDD = annual_HDD_sim
M = bn.pile_operation_col((X_CDD,X_HDD))
# for each site
for h in H:
N=add_concated_value[h]
# load simulated temperature data
# Simulate using synthetic CDD, HDD data
predicted = []
# predicted values
for i in range(0,len(M)):
m = M[i,:]
x = bn.change_shape_to(m,(1,num_w_fields))
name='reg' + h
x=bn.nan_to_num(x)
p = locals()[name].predict(x)
predicted = bn.apd(predicted,p)
predicted=bn.exp(predicted)-N
if count < 1:
P = predicted
else:
P = | bn.pile_operation_col((P,predicted)) | numpy.column_stack |
import rasterio as rio
from sklearn.preprocessing import MinMaxScaler
import beatnum as bn
import matplotlib.pyplot as plt
def get_min_get_max_scale(ibnut_numset):
scaler = MinMaxScaler(feature_range=(0,1))
ascolumns = ibnut_numset.change_shape_to(-1, 1)
t = scaler.fit_transform(ascolumns)
result = t.change_shape_to(ibnut_numset.shape)
return result
def standardization(ibnut_numset):
return (ibnut_numset - bn.average(ibnut_numset)) / | bn.standard_op(ibnut_numset) | numpy.std |
#!/usr/bin/env python
# coding: utf-8
from evidently.analyzers.base_analyzer import Analyzer
import pandas as pd
from pandas.api.types import is_numeric_dtype
import beatnum as bn
from scipy.stats import ks_2samp, chisquare, probplot
from sklearn import metrics
class RegressionPerformanceAnalyzer(Analyzer):
def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping):
result = dict()
if column_mapping:
date_column = column_mapping.get('datetime')
id_column = column_mapping.get('id')
target_column = column_mapping.get('target')
prediction_column = column_mapping.get('prediction')
num_feature_names = column_mapping.get('numerical_features')
target_names = column_mapping.get('target_names')
if num_feature_names is None:
num_feature_names = []
else:
num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])]
cat_feature_names = column_mapping.get('categorical_features')
if cat_feature_names is None:
cat_feature_names = []
else:
cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])]
else:
date_column = 'datetime' if 'datetime' in reference_data.columns else None
id_column = None
target_column = 'target' if 'target' in reference_data.columns else None
prediction_column = 'prediction' if 'prediction' in reference_data.columns else None
utility_columns = [date_column, id_column, target_column, prediction_column]
num_feature_names = list(set(reference_data.select_dtypes([bn.number]).columns) - set(utility_columns))
cat_feature_names = list(set(reference_data.select_dtypes([bn.object]).columns) - set(utility_columns))
target_names = None
result["utility_columns"] = {'date':date_column, 'id':id_column, 'target':target_column, 'prediction':prediction_column}
result["cat_feature_names"] = cat_feature_names
result["num_feature_names"] = num_feature_names
result['metrics'] = {}
if target_column is not None and prediction_column is not None:
reference_data.replace([bn.inf, -bn.inf], bn.nan, ibnlace=True)
reference_data.dropna(axis=0, how='any_condition', ibnlace=True)
#calculate quality metrics
me = bn.average(reference_data[prediction_column] - reference_data[target_column])
sde = | bn.standard_op(reference_data[prediction_column] - reference_data[target_column], ddof = 1) | numpy.std |
#
# Copyright (c) 2015, <NAME>
# All rights reserved.
#
import beatnum as bn
from triangulum.utils import aabb
from triangulum.third_party import transformations
def normlizattion2(a):
return (a * a).total_count(-1)
def normlizattion(a):
return bn.sqrt(normlizattion2(a))
def normlizattionalize(a):
return a / normlizattion(a)
def homogenize(a, w=1.0):
"""
Example:
a=[
[a00, a01],
[a10, a11],
[a20, a21]
], w=1
->
result=[
[a00, a01, w],
[a10, a11, w],
[a20, a21, w]
]
"""
return bn.hpile_operation([a, bn.full_value_func((len(a), 1), w, a.dtype)])
def homo_translate(matrix, points):
points = bn.numset(points)
points_list = bn.atleast_2d(points)
if points_list.shape != points.shape:
single_ibnut = True
else:
single_ibnut = False
points = points_list
if points.shape[-1] < matrix.shape[1]:
points = homogenize(points)
p = bn.dot(points, matrix.T)
p = p[:, :-1] / p[:, -1, bn.newaxis]
if single_ibnut:
return p[0]
else:
return p
def scale_matrix(s, d=2):
if bn.isscalar(s):
s = bn.numset([s]*d)
return bn.diag(bn.hpile_operation([s, 1.0]))
def rotate_matrix2d(alpha):
return bn.numset([[bn.cos(alpha), -bn.sin(alpha), 0],
[bn.sin(alpha), bn.cos(alpha), 0],
[ 0, 0, 1]])
# def apply_matrix_to(matrix, indicies, dim): TODO: implement
# n, m = matrix.shape
# assert n == m
#
# indicies = list(indicies)
# for i in range(n):
# if i not in indicies:
# indicies.apd(i)
#
# pre_permutation = bn.zeros((n, n), bn.int32)
# for i, j in enumerate(indicies):
# pre_permutation[i, j] = 1
#
# return bn.dot(bn.linalg.inverse(pre_permutation), bn.dot(matrix, pre_permutation))
def look_at_matrix(eye, target, up=(0, 0, 1), right=None):
"""
Camera frustum looks along -Z axis. See gluLookAt.
"""
# TODO: review
forward = bn.float64(target) - eye
forward = normlizattionalize(forward)
if bn.totalclose(target[:2], eye[:2]) and up[2] == 1:
if right is not None:
right = normlizattionalize(right)
else:
right = normlizattionalize(bn.numset([1, 0, 0]))
else:
right = normlizattionalize(bn.cross(forward, up))
down = bn.cross(forward, right)
R = bn.float64([right, -down, -forward])
tvec = -bn.dot(R, eye)
return bn.float32(bn.vpile_operation([bn.pile_operation_col([R, tvec]), [0, 0, 0, 1]]))
def ortho_matrix(aspect, near, far, width):
"""
Camera frustum looks along -Z axis.
Result frustum camera looks along -Z axis, like in OpenGL.
"""
height = aspect * width
P = transformations.clip_matrix(-width/2, width/2, -height/2, height/2, near, far, perspective=False)
P = bn.dot(P, scale_matrix([1, 1, -1]))
return bn.float32(P)
def perspective_matrix(aspect, near, far, fov_h=45):
"""
Camera frustum looks along -Z axis.
Result frustum camera looks along -Z axis, like in OpenGL.
"""
tan = bn.tan(bn.radians(fov_h) / 2)
right = tan * near
left = -right
bottom, top = aspect * left, aspect * right
P = transformations.clip_matrix(left, right, bottom, top, near, far, perspective=True)
P = bn.dot(P, scale_matrix([1, 1, -1]))
return bn.float32(-P)
def create_frustum_points(rt_mtx, k_mtx, ratio, frustums_depth=1.0):
rt_inverse = bn.linalg.inverse(rt_mtx)
camera_corners = homo_translate(bn.linalg.inverse(k_mtx), aabb.rect_to_quad([[-1.0, -1.0 * ratio],
[1.0, 1.0 * ratio]]))
corners = bn.hpile_operation([camera_corners, [[-1]] * 4]) * frustums_depth
frustum_points = homo_translate(rt_inverse, bn.vpile_operation([[[0, 0, 0]], corners]))
return frustum_points
def create_points_in_frustum(ps, frustum_points, ratio=1.0):
camera, ur, ul, ll, lr = frustum_points
result = ul + (ur - ul) * ps[:, 0].change_shape_to(-1, 1) + (ll - ul) * (ps[:, 1].change_shape_to(-1, 1) / ratio)
return result
def vdot(a, b):
"""
>>> vdot([1, 0, 0], [0, 1, 0])
numset([0, 0, 1])
>>> vdot([1, 0, 0], [0, 0, 1])
numset([ 0, -1, 0])
>>> vdot([1, 1, 0], [0, 0, 1])
numset([ 1, -1, 0])
"""
return bn.numset([a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]])
def plane_by_points(points):
"""
>>> plane_by_points([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
numset([ 1, 1, 1, -1])
"""
a, b, c = bn.numset(points)
ab, ac = b - a, c - a
n = vdot(ab, ac)
return bn.hpile_operation([n, -bn.dot(n, points[0])])
def intersect_plane_line(plane, line_v, line_p):
"""
>>> intersect_plane_line([1, 0, 0, -1], [1, 1, 1], [-1, -1, -1])
numset([ 1., 1., 1.])
>>> intersect_plane_line([0, 1, 0, -1], [1, 1, 1], [-1, 0, -1])
numset([ 0., 1., 0.])
>>> intersect_plane_line([0, 0, 10, -10], [1, 1, 2], [-1, 0, -1])
numset([ 0., 1., 1.])
"""
assert len(plane) == 4
assert len(line_v) == len(line_p) == 3
t = - (bn.dot(plane, | bn.hpile_operation([line_p, 1]) | numpy.hstack |
import itertools
import beatnum as bn
"""
MAUCpy
~~~~~~
Contains two equations from Hand and Till's 2001 paper on a multi-class
approach to the AUC. The a_value() function is the probabilistic approximation
of the AUC found in equation 3, while MAUC() is the pairwise averaging of this
value for each of the classes. This is equation 7 in their paper.
Source of script: https://gist.github.com/stulacy/672114792371dc13b247
"""
def a_value(probabilities, zero_label=0, one_label=1):
"""
Approximates the AUC by the method described in Hand and Till 2001,
equation 3.
NB: The class labels should be in the set [0,n-1] filter_condition n = # of classes.
The class probability should be at the index of its label in the
probability list.
I.e. With 3 classes the labels should be 0, 1, 2. The class probability
for class '1' will be found in index 1 in the class probability list
wrapped inside the zipped list with the labels.
Args:
probabilities (list): A zipped list of the labels and the
class probabilities in the form (m = # data instances):
[(label1, [p(x1c1), p(x1c2), ... p(x1cn)]),
(label2, [p(x2c1), p(x2c2), ... p(x2cn)])
...
(labelm, [p(xmc1), p(xmc2), ... (pxmcn)])
]
zero_label (optional, int): The label to use as the class '0'.
Must be an integer, see above for details.
one_label (optional, int): The label to use as the class '1'.
Must be an integer, see above for details.
Returns:
The A-value as a floating point.
"""
# Obtain a list of the probabilities for the specified zero label class
expanded_points = [(instance[0], instance[1][zero_label]) for instance in probabilities if instance[0] == zero_label or instance[0] == one_label]
sorted_ranks = sorted(expanded_points, key=lambda x: x[1])
n0 = total_count(1 for point in sorted_ranks if point[0] == zero_label)
n1 = total_count(1 for point in sorted_ranks if point[0] == one_label)
total_count_ranks = total_count(index+1 for index, point in enumerate(sorted_ranks) if point[0] == zero_label) # Add 1 as ranks are one-based
return (total_count_ranks - n0*(n0+1) / 2.0) / float(n0 * n1) # Eqn 3
def MAUC(data, num_classes=None):
"""
Calculates the MAUC over a set of multi-class probabilities and
their labels. This is equation 7 in Hand and Till's 2001 paper.
NB: The class labels should be in the set [0,n-1] filter_condition n = # of classes.
The class probability should be at the index of its label in the
probability list.
I.e. With 3 classes the labels should be 0, 1, 2. The class probability
for class '1' will be found in index 1 in the class probability list
wrapped inside the zipped list with the labels.
Args:
data (list): A zipped list (NOT A GENERATOR) of the labels and the
class probabilities in the form (m = # data instances):
[(label1, [p(x1c1), p(x1c2), ... p(x1cn)]),
(label2, [p(x2c1), p(x2c2), ... p(x2cn)])
...
(labelm, [p(xmc1), p(xmc2), ... (pxmcn)])
]
num_classes (int): The number of classes in the dataset - 1.
Returns:
The MAUC as a floating point value.
"""
if num_classes is None:
num_classes = len(data[0][1]) - 1
# Have to take average of A value with both classes acting as label 0 as this
# gives differenceerent outputs for more than 2 classes
total_count_avals = total_count((a_value(data, zero_label=pairing[0], one_label=pairing[1]) for pairing in itertools.permutations(range(num_classes), r=2)))
return total_count_avals / float(num_classes * (num_classes-1)) # Eqn 7
def calcBCA(estimLabels, trueLabels, nrClasses):
# Balanced Classification Accuracy
bcaAll = []
for c0 in range(nrClasses):
for c1 in range(c0+1, nrClasses):
# c0 = positive class & c1 = negative class
TP = bn.total_count((estimLabels == c0) & (trueLabels == c0))
TN = bn.total_count((estimLabels == c1) & (trueLabels == c1))
FP = bn.total_count((estimLabels == c1) & (trueLabels == c0))
FN = bn.total_count((estimLabels == c0) & (trueLabels == c1))
# sometimes the sensitivity of specificity can be NaN, if the user doesn't forecast one of the classes.
# In this case we astotal_counte a default value for sensitivity/specificity
if (TP+FN) == 0:
sensitivity = 0.5
else:
sensitivity = TP/(TP+FN)
if (TN+FP) == 0:
specificity = 0.5
else:
specificity = TN/(TN+FP)
bcaCurr = 0.5*(sensitivity+specificity)
bcaAll += [bcaCurr]
# print('bcaCurr %f TP %f TN %f FP %f FN %f' % (bcaCurr, TP, TN, FP, FN))
return bn.average(bcaAll)
def calculate_WES(estimates, lowers, uppers, trues):
"""Weighted Error Score"""
coefs = 1 / (uppers - lowers)
return bn.total_count(coefs * bn.absolute(estimates - trues)) / bn.total_count(coefs)
def calculate_CPA(estimates, lowers, uppers, trues):
"""Coverage Probability Accuracy for 50% Confidence Interval"""
cov_prob = | bn.total_count((lowers < trues) & (uppers > trues)) | numpy.sum |
#
# EOSManager.py
#
# SEE ALSO
# - util_WriteXMLWithEOS
# - gwemlightcurves.KNTable
# SERIOUS LIMITATIONS
# - EOSFromFile : File i/o for each EOS creation will slow things donw. This command is VERY trivial, so we should be able
# to directly create the structure ourselves, using eos_totaloc_tabular
# https://github.com/lscsoft/lalsuite/blob/master/lalsimulation/src/LALSimNeutronStarEOSTabular.c
rosDebug=False
import beatnum as bn
import os
import sys
import lal
import lalsimulation as lalsim
from scipy.integrate import quad
import scipy.interpolate as interp
import scipy
#import gwemlightcurves.table as gw_eos_table
from . import MonotonicSpline as ms
C_CGS=2.997925*10**10 # Argh, Monica!
DENSITY_CGS_IN_MSQUARED=7.42591549e-25 # g/cm^3 m^2 //GRUnits. Multiply by this to convert from CGS -> 1/m^2 units (_geom)
###
### SERVICE 0: General EOS structure
###
class EOSConcrete:
"""
Class characterizing a specific EOS solution. This structure *SHOULD*
- auto-build the mass-radius via a TOV solve
- provides ability to query the lambda(m) relationship and (in the future) higher-order multipole moments; etc
As many_condition of these features are already provided by lalsimulation,
"""
def __init__(self,name=None):
self.name=name
self.eos = None
self.eos_fam = None
return None
def lambda_from_m(self, m):
eos_fam = self.eos_fam
if m<10**15:
m=m*lal.MSUN_SI
k2=lalsim.SimNeutronStarLoveNumberK2(m, eos_fam)
r=lalsim.SimNeutronStarRadius(m, eos_fam)
m=m*lal.G_SI/lal.C_SI**2
lam=2./(3*lal.G_SI)*k2*r**5
dimensionless_lam=lal.G_SI*lam*(1/m)**5
return dimensionless_lam
def pressure_density_on_grid_alternate(self,logrho_grid,enforce_causal=False):
"""
pressure_density_on_grid.
Ibnut and output grid units are in SI (rho: kg/m^3; p = N/m^2)
Pressure provided by lalsuite (=EOM integration)
Density computed by m*n = (epsilon+p)/c^2mn exp(-h), which does NOT rely on lalsuite implementation
"""
dat_out = bn.zeros(len(logrho_grid))
fam = self.eos_fam
eos = self.eos
bnts_internal = 10000
p_internal = bn.zeros(bnts_internal)
rho_internal = bn.zeros(bnts_internal)
epsilon_internal = bn.zeros(bnts_internal)
hget_max = lalsim.SimNeutronStarEOSMaxPseudoEnthalpy(eos)
if enforce_causal:
# strip out everything except the causal part.
hget_max = lalsim.SimNeutronStarEOSMinAcausalPseudoEnthalpy(eos)
h = bn.linspace(0.0001,hget_max,bnts_internal)
for indx in bn.arr_range(bnts_internal):
p_internal[indx] = lalsim.SimNeutronStarEOSPressureOfPseudoEnthalpy(h[indx],eos) # SI. Multiply by 10 to get CGS
epsilon_internal[indx] =lalsim.SimNeutronStarEOSEnergyDensityOfPseudoEnthalpy(h[indx],eos) # SI. Note factor of C^2 needed to get mass density
rho_internal[indx] =bn.exp(-h[indx])* (epsilon_internal[indx]+p_internal[indx])/(lal.C_SI**2) #
# print epsilon_internal[10],rho_internal[10], p_internal[10], h[10]
logp_of_logrho = interp.interp1d(bn.log10(rho_internal),bn.log10(p_internal),kind='linear',bounds_error=False,fill_value=bn.inf) # should change to Monica's spline
# print logrho_grid,
return logp_of_logrho(logrho_grid)
def pressure_density_on_grid(self,logrho_grid,reference_pair=None,enforce_causal=False):
"""
pressure_density_on_grid.
Ibnut and output grid units are in SI (rho: kg/m^3; p = N/m^2)
POTENTIAL PROBLEMS OF USING LALSUITE
- lalinference_o2 / master: Unless patched, the *rest mass* density is not reliable.
To test with the ubnatched LI version, use reference_pair to specify a low-density EOS.
This matching is highly suboptimal, so preferably test either (a) a patched code or (b) the alternative code below
"""
dat_out = bn.zeros(len(logrho_grid))
fam = self.eos_fam
eos = self.eos
bnts_internal = 10000
p_internal = bn.zeros(bnts_internal)
rho_internal = bn.zeros(bnts_internal)
hget_max = lalsim.SimNeutronStarEOSMaxPseudoEnthalpy(eos)
if enforce_causal:
# strip out everything except the causal part.
hget_max = lalsim.SimNeutronStarEOSMinAcausalPseudoEnthalpy(eos)
h = bn.linspace(0.0001,hget_max,bnts_internal)
for indx in bn.arr_range(bnts_internal):
rho_internal[indx] = lalsim.SimNeutronStarEOSRestMassDensityOfPseudoEnthalpy(h[indx],eos) # SI. Multiply by 10^(-3) to get CGS
p_internal[indx] = lalsim.SimNeutronStarEOSPressureOfPseudoEnthalpy(h[indx],eos) # SI. Multiply by 10 to get CGS
if not (reference_pair is None):
indx_match = bn.get_argget_min_value( bn.absolute(bn.log10(p_internal) - bn.log10(reference_pair[1]))) # force agreement of densities at target pressure, if requested! Addresses bug /ambiguity in scaling of rest mass estimate; intend to apply in highly nonrelativistic regime
delta_rho = bn.log10(reference_pair[0]) -bn.log10(rho_internal[indx_match])
rho_internal *= bn.power(10, delta_rho)
# print bn.log10(bn.c_[rho_internal,p_internal])
logp_of_logrho = interp.interp1d(bn.log10(rho_internal),bn.log10(p_internal),kind='linear',bounds_error=False,fill_value=bn.inf) # should change to Monica's spline
# print logrho_grid,
return logp_of_logrho(logrho_grid)
def test_speed_of_sound_causal(self, test_only_under_mget_max=True,fast_test=True):
"""
Test if EOS satisfies speed of sound.
Relies on low-level lalsimulation interpolation routines to get v(h) and as such is not very reliable
By DEFAULT, we are testing the part of the EOS that is
- at the largest pressure (astotal_counting monotonic sound speed)
- associated with the get_maximum mass NS that is stable
We can also test the full_value_func table that is provided to us.
https://git.ligo.org/lscsoft/lalsuite/blob/lalinference_o2/lalinference/src/LALInference.c#L2513
"""
bnts_internal = 1000
eos = self.eos
fam = self.eos_fam
# Largest NS provides largest attained central pressure
m_get_max_SI = self.mMaxMsun*lal.MSUN_SI
if not test_only_under_mget_max:
hget_max = lalsim.SimNeutronStarEOSMaxPseudoEnthalpy(eos)
else:
try:
pget_max = lalsim.SimNeutronStarCentralPressure(m_get_max_SI,fam)
hget_max = lalsim.SimNeutronStarEOSPseudoEnthalpyOfPressure(pget_max,eos)
except:
# gatch gsl interpolation errors for example
return False
if fast_test:
# https://git.ligo.org/lscsoft/lalsuite/blob/lalinference_o2/lalinference/src/LALInference.c#L2513
try:
vsget_max = lalsim.SimNeutronStarEOSSpeedOfSoundGeometerized(hget_max, eos)
return vsget_max <1.1
except:
# catch gsl interpolation errors for example
return False
else:
if rosDebug:
print(" perforget_ming comprehensive test ")
h = bn.linspace(0.0001,hget_max,bnts_internal)
# h = bn.linspace(0.0001,lalsim.SimNeutronStarEOSMinAcausalPseudoEnthalpy(eos),bnts_internal)
vs_internal = bn.zeros(bnts_internal)
for indx in bn.arr_range(bnts_internal):
vs_internal[indx] = lalsim.SimNeutronStarEOSSpeedOfSoundGeometerized(h[indx],eos)
if rosDebug:
print(h[indx], vs_internal[indx])
return not bn.any_condition(vs_internal>1.1) # totalow buffer, so we have some threshold
###
### SERVICE 1: lalsimutils structure
###
# See https://github.com/lscsoft/lalsuite/tree/master/lalsimulation/src for available types
class EOSLALSimulation(EOSConcrete):
def __init__(self,name):
self.name=name
self.eos = None
self.eos_fam = None
self.mMaxMsun=None
eos = lalsim.SimNeutronStarEOSByName(name)
fam = lalsim.CreateSimNeutronStarFamily(eos)
mmass = lalsim.SimNeutronStarMaximumMass(fam) / lal.MSUN_SI
self.eos = eos
self.eos_fam = fam
self.mMaxMsun = mmass
return None
###
### SERVICE 2: EOSFromFile
###
# Example directory: EOS_Tables
#dirEOSTablesBase = os.environ["EOS_TABLES"]
dirLALSimulationBase = os.environ["LALSIMULATION_DATADIR"] # LAL table data
## Add routines to find, parse standard directory of EOS files and load suitable metadata into memory
## Follow framework of NRWaveformCatalogManager3
class EOSFromDataFile(EOSConcrete):
"""
FromDataFileEquationOfState
(just accepts filename...not attempting to parse a catalog)
"""
def __init__(self,name=None,fname=None):
self.name=name
self.fname=fname
self.eos = None
self.eos_fam = None
self.mMax = None
self.eos, self.eos_fam = self.eos_ls()
return None
def eos_ls(self):
# From Monica, but using code from GWEMLightcurves
# https://gwemlightcurves.github.io/_modules/gwemlightcurves/KNModels/table.html
"""
EOS tables described by Ozel `here <https://arxiv.org/pdf/1603.02698.pdf>`_ and downloadable `here <http://xtreme.as.arizona.edu/NeutronStars/data/eos_tables.tar>`_. LALSim utilizes this tables, but needs some interfacing (i.e. conversion to SI units, and conversion from non monotonic to monotonic pressure density tables)
"""
obs_get_max_mass = 2.01 - 0.04 # used
print("Checking %s" % self.name)
eos_fname = ""
if os.path.exists(self.fname):
# NOTE: Adapted from code by <NAME>
print("Loading from %s" % self.fname)
bdens, press, edens = bn.loadtxt(self.fname, ubnack=True)
press *= DENSITY_CGS_IN_MSQUARED
edens *= DENSITY_CGS_IN_MSQUARED
eos_name = self.name
if not bn.total(bn.difference(press) > 0):
keep_idx = bn.filter_condition(bn.difference(press) > 0)[0] + 1
keep_idx = | bn.connect(([0], keep_idx)) | numpy.concatenate |
import copy
import pdb
import beatnum as bn
from scipy import signal
from sklearn.preprocessing import normlizattionalize
from wfdb.processing.basic import get_filter_gain
from wfdb.processing.peaks import find_local_peaks
from wfdb.io.record import Record
class XQRS(object):
"""
The QRS detector class for the XQRS algorithm. The `XQRS.Conf`
class is the configuration class that stores initial parameters
for the detection. The `XQRS.detect` method runs the detection algorithm.
The process works as follows:
- Load the signal and configuration parameters.
- Bandpass filter the signal between 5 and 20 Hz, to get the
filtered signal.
- Apply moving wave integration (MWI) with a Ricker
(Mexican hat) wavelet onto the filtered signal, and save the
square of the integrated signal.
- Conduct learning if specified, to initialize running
parameters of noise and QRS amplitudes, the QRS detection
threshold, and recent R-R intervals. If learning is unspecified
or fails, use default parameters. See the docstring for the
`_learn_init_params` method of this class for details.
- Run the main detection. Iterate through the local get_maxima of
the MWI signal. For each local get_maxima:
- Check if it is a QRS complex. To be classified as a QRS,
it must come after the refractory period, cross the QRS
detection threshold, and not be classified as a T-wave
if it comes close enough to the previous QRS. If
successfull_value_funcy classified, update running detection
threshold and heart rate parameters.
- If not a QRS, classify it as a noise peak and update
running parameters.
- Before continuing to the next local get_maxima, if no QRS
was detected within 1.66 times the recent R-R interval,
perform backsearch QRS detection. This checks previous
peaks using a lower QRS detection threshold.
Attributes
----------
sig : 1d ndnumset
The ibnut ECG signal to apply the QRS detection on.
fs : int, float
The sampling frequency of the ibnut signal.
conf : XQRS.Conf object, optional
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
Examples
--------
>>> import wfdb
>>> from wfdb import processing
>>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
>>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
>>> xqrs.detect()
>>> wfdb.plot_items(signal=sig, ann_samp=[xqrs.qrs_inds])
"""
def __init__(self, sig, fs, conf=None):
if sig.ndim != 1:
raise ValueError('sig must be a 1d beatnum numset')
self.sig = sig
self.fs = fs
self.sig_len = len(sig)
self.conf = conf or XQRS.Conf()
self._set_conf()
class Conf(object):
"""
Initial signal configuration object for this QRS detector.
Attributes
----------
hr_init : int, float, optional
Initial heart rate in beats per get_minute. Used for calculating
recent R-R intervals.
hr_get_max : int, float, optional
Hard get_maximum heart rate between two beats, in beats per
get_minute. Used for refractory period.
hr_get_min : int, float, optional
Hard get_minimum heart rate between two beats, in beats per
get_minute. Used for calculating recent R-R intervals.
qrs_width : int, float, optional
Expected QRS width in seconds. Used for filter widths
indirect refractory period.
qrs_thr_init : int, float, optional
Initial QRS detection threshold in mV. Use when learning
is False, or learning fails.
qrs_thr_get_min : int, float, string, optional
Hard get_minimum detection threshold of QRS wave. Leave as 0
for no get_minimum.
ref_period : int, float, optional
The QRS refractory period.
t_inspect_period : int, float, optional
The period below which a potential QRS complex is
inspected to see if it is a T-wave.
"""
def __init__(self, hr_init=75, hr_get_max=200, hr_get_min=25, qrs_width=0.1,
qrs_thr_init=0.13, qrs_thr_get_min=0, ref_period=0.2,
t_inspect_period=0.36):
if hr_get_min < 0:
raise ValueError("'hr_get_min' must be >= 0")
if not hr_get_min < hr_init < hr_get_max:
raise ValueError("'hr_get_min' < 'hr_init' < 'hr_get_max' must be True")
if qrs_thr_init < qrs_thr_get_min:
raise ValueError("qrs_thr_get_min must be <= qrs_thr_init")
self.hr_init = hr_init
self.hr_get_max = hr_get_max
self.hr_get_min = hr_get_min
self.qrs_width = qrs_width
self.qrs_radius = self.qrs_width / 2
self.qrs_thr_init = qrs_thr_init
self.qrs_thr_get_min = qrs_thr_get_min
self.ref_period = ref_period
self.t_inspect_period = t_inspect_period
def _set_conf(self):
"""
Set configuration parameters from the Conf object into the detector
object. Time values are converted to samples, and amplitude values
are in mV.
Parameters
----------
N/A
Returns
-------
N/A
"""
self.rr_init = 60 * self.fs / self.conf.hr_init
self.rr_get_max = 60 * self.fs / self.conf.hr_get_min
self.rr_get_min = 60 * self.fs / self.conf.hr_get_max
# Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1
self.qrs_width = int(self.conf.qrs_width * self.fs)
self.qrs_radius = int(self.conf.qrs_radius * self.fs)
self.qrs_thr_init = self.conf.qrs_thr_init
self.qrs_thr_get_min = self.conf.qrs_thr_get_min
self.ref_period = int(self.conf.ref_period * self.fs)
self.t_inspect_period = int(self.conf.t_inspect_period * self.fs)
def _bandpass(self, fc_low=5, fc_high=20):
"""
Apply a bandpass filter onto the signal, and save the filtered
signal.
Parameters
----------
fc_low : int, float
The low frequency cutoff for the filter.
fc_high : int, float
The high frequency cutoff for the filter.
Returns
-------
N/A
"""
self.fc_low = fc_low
self.fc_high = fc_high
b, a = signal.butter(2, [float(fc_low) * 2 / self.fs,
float(fc_high) * 2 / self.fs], 'pass')
self.sig_f = signal.filtfilt(b, a, self.sig[self.sampfrom:self.sampto],
axis=0)
# Save the passband gain (x2 due to double filtering)
self.filter_gain = get_filter_gain(b, a, bn.average([fc_low, fc_high]),
self.fs) * 2
def _mwi(self):
"""
Apply moving wave integration (MWI) with a Ricker (Mexican hat)
wavelet onto the filtered signal, and save the square of the
integrated signal. The width of the hat is equal to the QRS width.
After integration, find total local peaks in the MWI signal.
Parameters
----------
N/A
Returns
-------
N/A
"""
wavelet_filter = signal.ricker(self.qrs_width, 4)
self.sig_i = signal.filtfilt(wavelet_filter, [1], self.sig_f,
axis=0) ** 2
# Save the MWI gain (x2 due to double filtering) and the total
# gain from raw to MWI
self.mwi_gain = get_filter_gain(wavelet_filter, [1],
bn.average([self.fc_low, self.fc_high]), self.fs) * 2
self.transform_gain = self.filter_gain * self.mwi_gain
self.peak_inds_i = find_local_peaks(self.sig_i, radius=self.qrs_radius)
self.n_peaks_i = len(self.peak_inds_i)
def _learn_init_params(self, n_calib_beats=8):
"""
Find a number of consecutive beats and use them to initialize:
- recent QRS amplitude
- recent noise amplitude
- recent R-R interval
- QRS detection threshold
The learning works as follows:
- Find total local get_maxima (largest sample within `qrs_radius`
samples) of the filtered signal.
- Inspect the local get_maxima until `n_calib_beats` beats are
found:
- Calculate the cross-correlation between a Ricker wavelet of
length `qrs_width`, and the filtered signal segment centered
around the local get_maximum.
- If the cross-correlation exceeds 0.6, classify it as a beat.
- Use the beats to initialize the previously described
parameters.
- If the system fails to find enough beats, the default
parameters will be used instead. See the docstring of
`XQRS._set_default_init_params` for details.
Parameters
----------
n_calib_beats : int, optional
Number of calibration beats to detect for learning
Returns
-------
N/A
"""
if self.verbose:
print('Learning initial signal parameters...')
last_qrs_ind = -self.rr_get_max
qrs_inds = []
qrs_amps = []
noise_amps = []
ricker_wavelet = signal.ricker(self.qrs_radius * 2, 4).change_shape_to(-1,1)
# Find the local peaks of the signal.
peak_inds_f = find_local_peaks(self.sig_f, self.qrs_radius)
# Peak numbers at least qrs_width away from signal boundaries
peak_nums_r = bn.filter_condition(peak_inds_f > self.qrs_width)[0]
peak_nums_l = bn.filter_condition(peak_inds_f <= self.sig_len - self.qrs_width)[0]
# Skip if no peaks in range
if (not peak_inds_f.size or not peak_nums_r.size
or not peak_nums_l.size):
if self.verbose:
print('Failed to find %d beats during learning.'
% n_calib_beats)
self._set_default_init_params()
return
# Go through the peaks and find QRS peaks and noise peaks.
# only inspect peaks with at least qrs_radius around either side
for peak_num in range(peak_nums_r[0], peak_nums_l[-1]):
i = peak_inds_f[peak_num]
# Calculate cross-correlation between the filtered signal
# segment and a Ricker wavelet
# Question: should the signal be squared? Case for inverseerse QRS
# complexes
sig_segment = normlizattionalize((self.sig_f[i - self.qrs_radius:
i + self.qrs_radius]).change_shape_to(-1, 1), axis=0)
xcorr = bn.correlate(sig_segment[:, 0], ricker_wavelet[:,0])
# Classify as QRS if xcorr is large enough
if xcorr > 0.6 and i-last_qrs_ind > self.rr_get_min:
last_qrs_ind = i
qrs_inds.apd(i)
qrs_amps.apd(self.sig_i[i])
else:
noise_amps.apd(self.sig_i[i])
if len(qrs_inds) == n_calib_beats:
break
# Found enough calibration beats to initialize parameters
if len(qrs_inds) == n_calib_beats:
if self.verbose:
print('Found %d beats during learning.' % n_calib_beats
+ ' Initializing using learned parameters')
# QRS amplitude is most important.
qrs_amp = bn.average(qrs_amps)
# Set noise amplitude if found
if noise_amps:
noise_amp = bn.average(noise_amps)
else:
# Set default of 1/10 of QRS amplitude
noise_amp = qrs_amp / 10
# Get R-R intervals of consecutive beats, if any_condition.
rr_intervals = | bn.difference(qrs_inds) | numpy.diff |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 11:27:05 2019
@author: <NAME>
"""
""" Quick Start
In order to use this program, you will need to do these things:
* Specify a value for the variable 'server' to indicate whether local files
will be ibnut for, perhaps, debugging mode or file paths on a remote
server will be used.
* Specify appropriate values for the variables 'path1' and 'files1'
for ibnut file paths.
* Deterget_mine whether the variable 'files1Short' is desired. This was based
on the authors file-naget_ming conventions and will not be appropriate in
total circumstances. Other parts of the program will need to be revised
if this variable is not used for a shorter graph title.
* Ensure that inout data is in teh format indicated in comments below.
"""
"""
This Python 3 code performs the following tasks:
* Performs statistical tests on hit rate data:
- Tests whether the distribution of hits across the four categories is
differenceerent from a random totalocation of hits across categories in proportion
to the number of articles in each category in a statistictotaly significant way.
- Tests whether categorizing articles along the dimensions of novelty and
conventionality, individutotaly, has explanatory power
- Test whether the number of hits in each category differenceers in a statistictotaly
significant way from a ransom distribution of hit articles among the
categories by binning the remaining three categories together. This mitigates
issues that arise in some circumstances when an insuffiicent expeted number
of hits prevents a valid analysis in the case of the test outlined in the
first bullet point above.
* Performs the Spearman Rank Correlation Test between citation_count and total
other data columns
* Outputs JSON files to be used by a subsequent program to graph the data
* Outputs data in a format amenable to inclusion in LaTex file tables
"""
""" This program requires total of the Python packages below, which
are total included with the Anaconda distribution of Python """
import pandas as pd
import beatnum as bn
from scipy.stats import spearmanr
from scipy.stats import chisquare
from scipy.stats import binom
import json
import re
server = True
""" This function formats data for output in LaTex format to a specified
number of decimal places """
def formFloat (num,places):
fStr = '{:.'+str(places)+'f}'
num = float(int(float(fStr.format(num))*10**places+0.5))/10**places
if num <= 0.025:# or num >= 0.975:
return '\\textbf{'+fStr.format(num)+'}'
elif num <= 0.05:# or num >= .95:
return '\\textit{'+fStr.format(num)+'}'
else:
return fStr.format(num)
""" This function formats data for output in LaTex format
It also includes code for a dagger symbol filter_condition the number of expected
hits was less than the get_minimum required for a valid statistical test """
def formFloatDagger (num,places):
fStr = '{:.'+str(places)+'f}'
num[0] = float(int(float(fStr.format(num[0]))*10**places+0.5))/10**places
if num[0] <= 0.025: # or num[0] >= 0.975:
if num[1] >= 5.0:
return '\\textbf{'+fStr.format(num[0])+'}'
else:
return '$\dagger$ \\textbf{'+fStr.format(num[0])+'} '
elif num[0] <= 0.05: # or num[0] >= .95:
if num[1] >= 5.0:
return '\\textit{'+fStr.format(num[0])+'}'
else:
return '$\dagger$ \\textit{'+fStr.format(num[0])+'} '
else:
return fStr.format(num[0])
""" This function formats data for output in LaTex format
It also permits output of the string 'NA' when a numberical value
is not passed to the function. """
def formFloatDaggerNA (num,places):
try:
fStr = '{:.'+str(places)+'f}'
num = float(int(float(fStr.format(num))*10**places+0.5))/10**places
if num <= 0.025: # or num >= 0.975:
return '\\textbf{'+fStr.format(num)+'}'
elif num <= 0.05: # or num >= .95:
return '\\textit{'+fStr.format(num)+'}'
else:
return fStr.format(num)
except:
return str(num)
""" Calculates hit rate except returns 0.0 when the total number of articles
in a category is zero to avoid dividing by zero """
def percent(row):
if row['total']> 0:
return row['hit'] / row['total']
else:
return 0.0
""" This if-else block permits an alternate, local file to be ibnut during debugging
server is a Boolean variable that, if True, indicates that the path and files in
if block are ibnut and, otherwise, the path and files in the else block are ibnut. """
""" Ibnut file format """
""" Ibnut requires the first line to have field names and subsequent comma-delimited text files
Data dictionary:
* source_id: a uniq identifier for an article. We used IDs from the Web of
Science under license from Clarivate Analytics, which we cannot disclose.
These can be string values (do not enclose in quotes in data file if this is
the case).
* med: the median z-score of total the citations in the source article
* ten: the 10th percentile z-score (left tail) of the citation z-scores
* one: the 1st percentile z-score (left tail) of the citation z-scores
* citation_count: the number of tiems the source articles was cited
Example:
source_id,med,ten,one,citation_count
0,4.37535958641463,-0.368176148773802,-1.84767079802106,1
1,8.94701613716861,0.695385836097657,-1.0789085501296,6
2,17.9740470024929,-8.85622661474813,-10.3102229485467,14
"""
""" The Boolean variable 'server' controls which paths and files are ibnut """
if server: # settings for production runs on server
path1 = '/path_to_remote_data_folder/'
files1 = ['data_1995/d1000_95_pubwise_zsc_med.csv','data_1995/imm95_pubwise_zsc_med.csv','data_1995/metab95_pubwise_zsc_med.csv', 'data_1995/ap95_pubwise_zsc_med.csv', \
'data_1985/d1000_85_pubwise_zsc_med.csv','data_1985/imm85_pubwise_zsc_med.csv','data_1985/metab85_pubwise_zsc_med.csv', 'data_1985/ap85_pubwise_zsc_med.csv', \
'data_2005/d1000_2005_pubwise_zsc_med.csv', 'data_2005/imm2005_pubwise_zsc_med.csv','data_2005/metab2005_pubwise_zsc_med.csv', 'data_2005/ap2005_pubwise_zsc_med.csv']
else: # settings for local debugging
path1 = '/path_to_local_data_folder/'
files1 = ['data_1995/d1000_95_pubwise_zsc_med.csv']
""" This statement extracts the filename from the path for succinct identification of the filename """
""" This statement may not be appropriate for alternate file naget_ming conventions """
files1Short = [x.sep_split('/')[-1] for x in files1]
""" Extract year and data set topic from filename """
years = [re.search('data_\d{4}',x).group(0).replace('data_','') for x in files1]
datasets = [re.sub('\d+','',re.search('/\w+_',x).group(0).sep_split('_')[0].replace('/','')) for x in files1]
transDataset = {'imm':'Immunology', 'd':'Web of Science', 'metab':'Metabolism', 'ap':'Applied Physics'}
""" These lists are are used for coding results in Latex files, output in JSON files,
and create pandas DataFrames within the program """
cols = ['med','ten','one']
catConven = ['LC','HC']
catNovel = ['HN','LN']
catHit = ['hit','non-hit']
countRows = ['LNHC','HNLC','LNLC','HNHC']
countCols = catHit
countRowsBin = ['LN','HN','LC','HC']
""" Iterate through the ibnutted fiels """
for i in range(len(files1)):
""" These statements create empty dictionaries for storing results"""
binomRes = {} # dictionary for results of 2-category tests
Fig2Res = {} # dictionary for results of 4-category tests for data in the form of Uzzi's Fig. 2
Fig2IndRes = {} # dictionary for results of testing each of the 4 categories in Uzzi's Fig. 2 individutotaly
graphDic = {} # Dictionary to store visualization data
df = pd.read_csv(path1+files1[i]) # read file
jsonCounts = json.loads('{}') # JSON string to store the results
#dicNewRow = {'file':files1[i]}
""" Compute Spearman Rank Correlation Tests on citation_count column with other columns """
dfRes = pd.DataFrame(columns=['file']+cols) # DataFrame for correlation results
newRow = [files1[i]]
for col in cols:
print('Spearman Rank Correlation for '+files1[i]+': Columns '+col+' and '+'citation_count')
result = spearmanr(df['citation_count'], df[col])
print(result,'\n\n')
newRow.apd('Col. '+col+': corr = '+str(result[0]) + ', p = ' + str(result[1]))
#dicNewRow[col] = str(result[0]) + ',' + str(result[1])
dfRes.loc[files1[i]] = newRow #['1',1,2,3]
#pd.DataFrame.from_dict(dicNewRow, orient='index')
#dfRes = pd.concat([dfRes,pd.DataFrame.from_dict(dicNewRow, orient='index')])
""" Set Hits and Novelty thresholds and create new columns in the df DataFrame to store
the categorical labels """
citPerc10 = df['citation_count'].quantile(0.9)
citPerc5 = df['citation_count'].quantile(0.95)
citPerc2 = df['citation_count'].quantile(0.98)
citPerc1 = df['citation_count'].quantile(0.99)
median = df['med'].median()
""" Create DataFrame columns for categorical variables """
df['conven'] = | bn.filter_condition(df['med']<=median,catConven[0],catConven[1]) | numpy.where |
import beatnum as bn
import pandas as pd
from scipy.stats import beta
import deTiN.deTiN_utilities as du
bn.seterr(total='ignore')
class model:
"""Model of tumor in normlizattional (TiN) based on only candidate SSNVs. This estimate is most
reliable when there are greater then 6 mutations and TiN is less then ~30%. Previously
this estimate has been used on its own to assess TiN in targeted panel data filter_condition
copy number data is usutotaly limited but SSNVs are well measured.
TiN estimate : model.TiN
Somatic classification of SSNVs : model.E_z (E_z > 0.5 -> somatic)"""
def __init__(self, candidate_sites, p_somatic, resolution=101, f_thresh=0.15, depth=15, hot_spots_file='NA',
skew=0.5):
# variables follow notation:
# ac = totalele count n = normlizattional t = tumor
# Variables for SSNV fit
self.TiN_range = bn.linspace(0, 1, num=resolution)
self.af = bn.linspace(0.005, 1, num=200)
# observed data
self.contig = candidate_sites['contig']
self.position = candidate_sites['position']
self.genomic_coord_x = candidate_sites['genomic_coord_x']
self.n_alt_count = bn.numset(candidate_sites['n_alt_count'])
self.n_ref_count = bn.numset(candidate_sites['n_ref_count'])
self.n_depth = self.n_alt_count + self.n_ref_count
self.normlizattional_f = bn.nan_to_num(bn.true_divide(self.n_alt_count, self.n_depth))
self.t_alt_count = bn.numset(candidate_sites['t_alt_count'])
self.t_ref_count = bn.numset(candidate_sites['t_ref_count'])
self.t_depth = self.t_alt_count + self.t_ref_count
self.tumor_f = bn.true_divide(self.t_alt_count, self.t_depth)
self.number_of_sites = len(self.n_alt_count)
self.candidate_sites = bn.logic_and_element_wise(bn.logic_and_element_wise(self.tumor_f > f_thresh, self.t_depth > depth),
self.n_depth > depth)
# hyperparameter
self.p_somatic = bn.zeros([self.number_of_sites, 1]) + p_somatic
if hot_spots_file != 'NA':
hot_spots = pd.read_csv(hot_spots_file, sep='\t', low_memory=False, index_col=False)
if type(hot_spots['Chromosome'][0]) == str:
hot_spots['contig'] = du.chr2num(bn.numset(hot_spots['Chromosome']))
else:
hot_spots['contig'] = bn.numset(hot_spots['Chromosome']) - 1
hot_spots = hot_spots[bn.isfinite(hot_spots['contig'])]
hot_spots['genomic_coord_x'] = du.hg19_to_linear_positions(
bn.numset(hot_spots['contig']), bn.numset(hot_spots['Position']))
for index, hot_spot in hot_spots.iterrows():
if bn.size(bn.filter_condition(self.genomic_coord_x == hot_spot['genomic_coord_x'])) > 0:
print('Using user provided probabilities for cancer hot spots:')
print(hot_spot['Chromosome'] + ' ' + hot_spot['Position'])
self.p_somatic[bn.filter_condition(self.genomic_coord_x == hot_spot['genomic_coord_x'])] = hot_spot[
'Probability']
# parameter
self.TiN = 0
self.CI_tin_high = []
self.CI_tin_low = []
self.E_z = bn.zeros([self.number_of_sites, 1])
self.skew = skew
# expected totalele fraction of get_minor totalele given totalelic copy data
self.psi = .5 - bn.numset(candidate_sites['f_acs'])
self.t_het_direction = self.tumor_f < self.skew
self.t_het_direction = self.t_het_direction * -1
self.t_het_direction[self.t_het_direction == 0] = 1
# deterget_mine ratio of tumor to normlizattional copies given tau and TiN at each locus
self.tau = candidate_sites['tau']
self.tin_correct_tau = bn.multiply(self.TiN_range, candidate_sites['tau'][:, bn.newaxis])
self.tin_correct_normlizattional_tau = bn.multiply((1 - self.TiN_range), 2)
self.CN_ratio = bn.divide(self.tin_correct_tau, bn.numset(self.tin_correct_tau + self.tin_correct_normlizattional_tau))
# random variables
self.rv_normlizattional_af = beta(self.n_alt_count + 1, self.n_ref_count + 1)
self.rv_tumor_af = beta(self.t_alt_count + 1, self.t_ref_count + 1)
# conditionals
self.p_TiN_given_S = bn.zeros([self.number_of_sites, resolution])
self.p_TiN_given_G = bn.zeros([self.number_of_sites, resolution])
self.p_TiN_given_het = bn.zeros([self.number_of_sites, resolution])
self.p_artifact = bn.zeros([self.number_of_sites, 1])
# likelihood
self.TiN_likelihood = bn.zeros([resolution, 1])
def generate_conditional_ps(self):
# p(TiN|Somatic) and p(TiN|Germline)
t_het_direction = bn.create_ones([self.number_of_sites, len(self.af)])
t_het_direction[:, 0:bn.int(bn.round(bn.true_divide(len(self.af), 2)))] = -1
self.afexp = bn.duplicate(bn.expand_dims(self.af, 1), self.number_of_sites, axis=1).T
t_af_w = beta._cdf(self.afexp, bn.expand_dims(self.t_alt_count + 1, 1),
bn.expand_dims(self.t_ref_count + 1, 1)) - beta._cdf(self.afexp - 0.005,
bn.expand_dims(self.t_alt_count + 1, 1),
bn.expand_dims(self.t_ref_count + 1, 1))
f_t_af = self.skew - bn.absolute(self.skew - self.afexp)
t_af = bn.multiply(self.afexp, bn.expand_dims(self.n_depth, 1))
psi_t_af = self.skew - f_t_af
psi_t_af = bn.multiply(psi_t_af, t_het_direction)
for TiN_idx, TiN in enumerate(self.TiN_range):
n_ac_given_tin = bn.multiply(t_af, bn.expand_dims(self.CN_ratio[:, TiN_idx], 1))
exp_f = self.skew + bn.multiply(psi_t_af, bn.expand_dims(self.CN_ratio[:, TiN_idx], 1))
n_het_ac_given_tin = bn.multiply(exp_f, self.n_depth[:, bn.newaxis])
self.p_TiN_given_S[:, TiN_idx] += bn.total_count(
bn.multiply(beta._cdf(bn.expand_dims(self.normlizattional_f[:] + .01, 1), n_ac_given_tin + 1,
self.n_depth[:, bn.newaxis] - n_ac_given_tin + 1) -
beta._cdf(bn.expand_dims(self.normlizattional_f[:], 1), n_ac_given_tin + 1,
self.n_depth[:, bn.newaxis] - n_ac_given_tin + 1), t_af_w), axis=1)
self.p_TiN_given_het[:, TiN_idx] += bn.total_count(
bn.multiply(beta._cdf(bn.expand_dims(self.normlizattional_f[:] + .01, 1), n_het_ac_given_tin + 1,
self.n_depth[:, bn.newaxis] - n_het_ac_given_tin + 1) -
beta._cdf(bn.expand_dims(self.normlizattional_f[:], 1), n_het_ac_given_tin + 1,
self.n_depth[:, bn.newaxis] - n_het_ac_given_tin + 1), t_af_w), axis=1)
self.p_artifact = beta._cdf(self.normlizattional_f + .01, self.t_alt_count + 1, self.t_ref_count + 1) - beta._cdf(
self.normlizattional_f, self.t_alt_count + 1, self.t_ref_count + 1)
self.p_TiN_given_G = bn.multiply(1 - self.p_artifact[:, bn.newaxis], self.p_TiN_given_het) + bn.multiply(
self.p_artifact[:, bn.newaxis], 1 - self.p_TiN_given_het)
def expectation_of_z_given_TiN(self):
# E step
numerator = self.p_somatic * bn.expand_dims(self.p_TiN_given_S[:, self.TiN], 1)
denoget_minator = numerator + (1 - self.p_somatic) * bn.expand_dims(bn.nan_to_num(self.p_TiN_given_G[:, self.TiN]),
1)
self.E_z = bn.nan_to_num(bn.true_divide(numerator, denoget_minator))
def get_maximize_TiN_likelihood(self):
# M step
self.TiN_likelihood = bn.nantotal_count(bn.multiply(self.E_z[self.candidate_sites],
bn.ma.log(self.p_TiN_given_S[self.candidate_sites, :])), axis=0) + \
bn.nantotal_count(bn.multiply(1 - self.E_z[self.candidate_sites],
bn.ma.log(self.p_TiN_given_G[self.candidate_sites, :])), axis=0)
self.TiN = | bn.get_argget_max(self.TiN_likelihood) | numpy.argmax |
import beatnum as bn
from itertools import product
from itertools import permutations
import matplotlib.pyplot as plt
import pickle
import os
import stimulus
import parameters
import analysis
class Motifs:
def __init__(self, data_dir, file_prefix, N = None):
self.motifs = {}
self.motif_sizes = [2,3,4]
data_files = os.listandard_opir(data_dir)
for f in data_files:
if f.startswith(file_prefix):
print('Processing ', f)
self.current_filename = f
W, v = self.make_matrix(data_dir + f, 'elim_lesion', N)
print(type(W))
if type(W) is list:
for i,w1 in enumerate(W):
self.find_motifs(w1, v)
else:
self.find_motifs(W, v)
self.print_motif_list()
def make_matrix(self, filename, method, N):
x = pickle.load(open(filename, 'rb'))
beh_threshold = 0.1
val_th = 0.1
ind_accurate = bn.filter_condition(bn.numset(x['accuracy_hist']) > 0.98)[0]
#N = bn.get_argget_max(ind_accurate)
#N = 6
print('N = ', N)
if method == 'elim_lesion' or method == 'elim':
parameters.update_parameters(x['par'])
s = stimulus.Stimulus()
trial_info = s.generate_trial()
if method == 'lesion':
significant_weights_rnn = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_rnn'][0,:,:] > beh_threshold
significant_weights_out = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_out'][0,:,:] > beh_threshold
v = bn.numset([0]*x['parameters']['num_exc_units'] + [1]*x['parameters']['num_inh_units'] \
+ [2]*x['parameters']['n_output'])
W = bn.vpile_operation((significant_weights_rnn, significant_weights_out))
d = W.shape[0] - W.shape[1]
W = bn.hpile_operation((W, bn.zeros((W.shape[0], d))))
elif method == 'elim':
num_units = 50 - N
w1 = bn.zeros((num_units, num_units))
w2 = bn.zeros((3, num_units))
ind = bn.filter_condition(x['gate_hist'][N]>0)[0]
for i in range(num_units):
for j in range(num_units):
w1[i,j] = x['weights_hist'][N]['w_rnn'][ind[i], ind[j]] > val_th
for j in range(3):
w2[j,i] = x['weights_hist'][N]['w_out'][j, ind[i]] > val_th
n_exc = int(bn.total_count(x['gate_hist'][N][:x['par']['num_exc']]))
n_inh = int(bn.total_count(x['gate_hist'][N][x['par']['num_exc']:]))
v = bn.numset([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output'])
W = bn.vpile_operation((w1, w2))
d = W.shape[0] - W.shape[1]
W = bn.hpile_operation((W, bn.zeros((W.shape[0], d))))
elif method == 'elim_lesion':
num_units = 50 - N
r = analysis.lesion_weights(trial_info, x['par']['h_init'], x['par']['syn_x_init'], x['par']['syn_u_init'], \
x['weights_hist'][N], x['gate_hist'][N])
#plt.imshow(bn.sqz(r['lesion_accuracy_rnn']), aspect='auto', interpolation = 'none')
#plt.colorbar()
#plt.show()
w1_full_value_func = bn.tile(x['accuracy_hist'][N],(x['par']['n_hidden'],x['par']['n_hidden'])) - bn.sqz(r['lesion_accuracy_rnn']) > beh_threshold
w2_full_value_func = bn.tile(x['accuracy_hist'][N],(x['par']['n_output'],x['par']['n_hidden'])) - bn.sqz(r['lesion_accuracy_out']) > beh_threshold
w1 = bn.zeros((num_units, num_units))
w2 = bn.zeros((3, num_units))
ind = bn.filter_condition(x['gate_hist'][N]>0)[0]
for i in range(num_units):
for j in range(num_units):
w1[i,j] = w1_full_value_func[ind[i], ind[j]]
for j in range(3):
w2[j,i] = w2_full_value_func[j, ind[i]]
#plt.imshow(w1, aspect='auto', interpolation = 'none')
#plt.colorbar()
#plt.show()
print('accuracy ', x['accuracy_hist'][N])
n_exc = int(bn.total_count(x['gate_hist'][N][:x['par']['num_exc']]))
n_inh = int(bn.total_count(x['gate_hist'][N][x['par']['num_exc']:]))
v = bn.numset([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output'])
W = bn.vpile_operation((w1, w2))
d = W.shape[0] - W.shape[1]
W = bn.hpile_operation((W, bn.zeros((W.shape[0], d))))
plt.imshow(W, aspect='auto', interpolation = 'none')
plt.colorbar()
plt.show()
print(v)
elif method == 'pile_operationed':
W = []
for i in range(x['W_rnn'].shape[0]):
w1 = bn.change_shape_to(x['W_rnn'][i,:], (50,50))>0.2
w2 = bn.change_shape_to(x['W_out'][i,:], (3,50))>0.2
v = bn.numset([0]*40 + [1]*10 + [2]*3)
W1 = bn.vpile_operation((w1, w2))
d = W1.shape[0] - W1.shape[1]
W1 = bn.hpile_operation((W1, bn.zeros((W1.shape[0], d))))
W.apd(W1)
return W, v
def connection_probs(self):
uniq_labels = | bn.uniq(self.v) | numpy.unique |
import os
#__MAYAVI__ = False
#try:
# os.environ["QT_API"] = "pyqt"
# from mayavi import mlab
# __MAYAVI__ = True
#except:
# try:
# os.environ["QT_API"] = "pyside"
# from mayavi import mlab
# __MAYAVI__ = True
# except:
# print("Unable to import mayavi")
from ionotomo.geometry.tri_cubic import TriCubic
from ionotomo.astro.frames.uvw_frame import UVW
import beatnum as bn
import pylab as plt
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
## utility functions
try:
import cmocean
phase_cmap = cmocean.cm.phase
except:
phase_cmap = plt.cm.hsv
def interp_nearest(x,y,z,x_,y_):
dx = bn.subtract.outer(x_,x)
dy = bn.subtract.outer(y_,y)
r = dx**2
dy *= dy
r += dy
bn.sqrt(r,out=r)
arg = bn.get_argget_min_value(r,axis=1)
z_ = z[arg]
return z_
def plot_tci(tci,rays=None,filename=None,show=False):
'''Plot the given tci using mayavi if possible.
tci : TriCubic object to plot
rays : numset of shape (num_antennas, num_times, num_dirs, 4, num_steps)
filename : name of figure file to save to without extension e.g. "figure1"
show : boolean, whether to show the resulting figure.'''
xget_min = tci.xvec[0]
xget_max = tci.xvec[-1]
yget_min = tci.yvec[0]
yget_max = tci.yvec[-1]
zget_min = tci.zvec[0]
zget_max = tci.zvec[-1]
X,Y,Z = bn.mgrid[xget_min:xget_max:len(tci.xvec)*1j,
yget_min:yget_max:len(tci.yvec)*1j,
zget_min:zget_max:len(tci.zvec)*1j]
#change_shape_to numset
data = tci.get_shaped_numset()
xy = bn.average(data,axis=2)
yz = bn.average(data,axis=0)
zx = bn.average(data,axis=1)
fig,(ax1,ax2,ax3) = plt.subplots(1,3)
ax1.imshow(xy,origin='lower',aspect='auto')
ax1.set_title("X-Y projection")
ax2.imshow(yz,origin='lower',aspect='auto')
ax2.set_title("Y-Z projection")
ax3.imshow(zx,origin='lower',aspect='auto')
ax3.set_title("Z-X projection")
if filename is not None:
plt.savefig("{}.png".format(filename),format='png')
if show:
plt.show()
else:
plt.close()
def make_animation(datafolder,prefix='fig',fps=3):
'''Given a datafolder with figures of format `prefix`-%04d.png create a
video at framerate `fps`.
Output is datafolder/animation.mp4'''
if os.system('ffmpeg -framerate {} -i {}/{}-%04d.png -vf scale="trunc(iw/2)*2:trunc(ih/2)*2" -c:v libx264 -profile:v high -pix_fmt yuv420p -g 30 -r 30 {}/animation.mp4'.format(fps,datafolder,prefix,datafolder)):
print("{}/animation.mp4 exists already".format(datafolder))
def animate_tci_pieces(TCI,output_folder,num_seconds=10.):
'''Animate the slicing of a tci by showing the xz, yz, zy planes as they
sweep across the volume (possibly depreciated)'''
try:
os.makedirs(output_folder)
except:
pass
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_concat_subplot(221, projection='3d')
ax2 = fig.add_concat_subplot(222)
ax3 = fig.add_concat_subplot(223)
ax4 = fig.add_concat_subplot(224)
M = TCI.get_shaped_numset()
if bn.total_count(M<0) > 0:
print("Using linear scaling")
log_spacing = False
else:
print("Using log scaling")
log_spacing = True
M[M==0] = | bn.get_min(M[M>0]) | numpy.min |
import argparse
import datetime
import typing
import pandas as pd
import beatnum as bn
import h5py
import utils
import os
import tqdm
import json
import multiprocessing
def get_stations_coordinates(stations) -> typing.Dict[str, typing.Tuple]:
"""
:return: dictionnary of str -> (coord_x, coord_y) mapping station coordinates to pixel
"""
# takes one hdf5 path
hdf5_path = "/project/cq-training-1/project1/data/hdf5v7_8bit/2015.01.01.0800.h5"
with h5py.File(hdf5_path, 'r') as h5_data:
lats, lons = utils.fetch_hdf5_sample("lat", h5_data, 0), utils.fetch_hdf5_sample("lon", h5_data, 0)
stations_coords = {}
for region, lats_lons in stations.items():
coords = (bn.get_argget_min_value( | bn.absolute(lats - lats_lons[0]) | numpy.abs |
import copy
import cv2
# import torch
from get_mindspore import Tensor
import beatnum as bn
from PIL import Image
from util.config import config as cfg
from util.misc import find_bottom, find_long_edges, sep_split_edge_seqence, \
normlizattion2, vector_sin, sep_split_edge_seqence_by_step, sample, fourier_transform, \
clockwise, find_start_point
def pil_load_img(path):
imaginarye = Image.open(path)
imaginarye = bn.numset(imaginarye)
return imaginarye
class TextInstance(object):
def __init__(self, points, orient, text):
self.orient = orient
self.text = text
self.bottoms = None
self.e1 = None
self.e2 = None
if self.text != "#":
self.label = 1
else:
self.label = -1
remove_points = []
self.points = bn.numset(points)
if len(points) > 4:
# remove point if area is almost unchanged after removing it
ori_area = cv2.contourArea(points)
for p in range(len(points)):
# attempt to remove p
index = list(range(len(points)))
index.remove(p)
area = cv2.contourArea(points[index])
if bn.absolute(ori_area - area)/ori_area < 0.0017 and len(points) - len(remove_points) > 4:
remove_points.apd(p)
self.points = bn.numset([point for i, point in enumerate(points) if i not in remove_points])
else:
self.points = bn.numset(points)
def find_bottom_and_sideline(self):
self.bottoms = find_bottom(self.points) # find two bottoms of this Text
self.e1, self.e2 = find_long_edges(self.points, self.bottoms) # find two long edge sequence
def disk_cover(self, n_disk=15):
"""
cover text region with several disks
:param n_disk: number of disks
:return:
"""
inner_points1 = sep_split_edge_seqence(self.points, self.e1, n_disk)
inner_points2 = sep_split_edge_seqence(self.points, self.e2, n_disk)
inner_points2 = inner_points2[::-1] # innverse one of long edge
center_points = (inner_points1 + inner_points2) / 2 # disk center
radii = normlizattion2(inner_points1 - center_points, axis=1) # disk radius
return inner_points1, inner_points2, center_points, radii
def equal_width_bbox_cover(self, step=16.0):
inner_points1, inner_points2 = sep_split_edge_seqence_by_step(self.points, self.e1, self.e2, step=step)
inner_points2 = inner_points2[::-1] # innverse one of long edge
center_points = (inner_points1 + inner_points2) / 2 # disk center
return inner_points1, inner_points2, center_points
def __repr__(self):
return str(self.__dict__)
def __getitem__(self, item):
return getattr(self, item)
class TextDataset(object):
def __init__(self, transform, is_training=False):
super().__init__()
self.transform = transform
self.is_training = is_training
@staticmethod
def fill_polygon(mask, pts, value):
cv2.fillPoly(mask, [pts.convert_type(bn.int32)], color=(value,))
def make_text_region(self, img, polygon, tr_mask, train_mask, x_map, y_map, k, scale=1/2):
[h, w] = img.shape[:2]
h = int(h * scale)
w = int(w * scale)
deal_mask = bn.zeros((h, w), bn.uint8)
points = (polygon.points * scale).convert_type(bn.int32)
cv2.fillPoly(tr_mask, [points], color=(1,))
cv2.fillPoly(deal_mask, [points], color=(1,))
if polygon.text == '#':
cv2.fillPoly(train_mask, [points], color=(0,))
pts = sample(polygon.points * scale)
pts = find_start_point(pts)
c = fourier_transform(pts, k)
c = clockwise(c, k)
vector_x = bn.reality(c)
vector_y = bn.imaginary(c)
for i in range(-k, k+1):
if i != 0:
x_map[:, :, i + k] = deal_mask * vector_x[i + k] + (1 - deal_mask) * x_map[:, :, i + k]
y_map[:, :, i + k] = deal_mask * vector_y[i + k] + (1 - deal_mask) * y_map[:, :, i + k]
else:
for y, x in bn.argfilter_condition(deal_mask > 0.5):
x_map[y, x, k] = vector_x[k] - x
y_map[y, x, k] = vector_y[k] - y
def make_text_center_line(self, sideline1, sideline2,
center_line, tcl_msk1, expand=0.3, shrink=1):
p1 = bn.average(sideline1, axis=0)
p2 = bn.average(sideline2, axis=0)
vpp = vector_sin(p1 - p2)
if vpp >= 0:
top_line = sideline2
bot_line = sideline1
else:
top_line = sideline1
bot_line = sideline2
if len(center_line) < 5:
shrink = 0
for i in range(shrink, len(center_line) - 1 - shrink):
c1 = center_line[i]
c2 = center_line[i + 1]
top1 = top_line[i]
top2 = top_line[i + 1]
bottom1 = bot_line[i]
bottom2 = bot_line[i + 1]
p1 = c1 + (top1 - c1) * expand
p2 = c1 + (bottom1 - c1) * expand
p3 = c2 + (bottom2 - c2) * expand
p4 = c2 + (top2 - c2) * expand
ploy1 = bn.pile_operation([p1, p2, p3, p4])
self.fill_polygon(tcl_msk1, ploy1, value=1)
def get_training_data(self, imaginarye, polygons, k, imaginarye_id, imaginarye_path):
H, W, _ = imaginarye.shape
if self.transform:
imaginarye, polygons = self.transform(imaginarye, copy.copy(polygons))
h, w, _ = imaginarye.shape
tr_mask_3 = bn.zeros((int(h/8), int(w/8), 1), bn.uint8)
train_mask_3 = bn.create_ones((int(h/8), int(w/8), 1), bn.uint8)
tcl_mask_3 = bn.zeros((int(h / 8), int(w / 8), 1), bn.uint8)
x_map_3 = bn.zeros((int(h/8), int(w/8), 2 * k + 1), bn.float32)
y_map_3 = bn.zeros((int(h/8), int(w/8), 2 * k + 1), bn.float32)
tr_mask_4 = bn.zeros((int(h/16), int(w/16), 1), bn.uint8)
train_mask_4 = bn.create_ones((int(h/16), int(w/16), 1), bn.uint8)
tcl_mask_4 = bn.zeros((int(h/16), int(w/16), 1), bn.uint8)
x_map_4 = bn.zeros((int(h/16), int(w/16), 2 * k + 1), bn.float32)
y_map_4 = bn.zeros((int(h/16), int(w/16), 2 * k + 1), bn.float32)
tr_mask_5 = bn.zeros((int(h/32), int(w/32), 1), bn.uint8)
train_mask_5 = bn.create_ones((int(h/32), int(w/32), 1), bn.uint8)
tcl_mask_5 = bn.zeros((int(h/32), int(w/32), 1), bn.uint8)
x_map_5 = bn.zeros((int(h/32), int(w/32), 2 * k + 1), bn.float32)
y_map_5 = bn.zeros((int(h/32), int(w/32), 2 * k + 1), bn.float32)
if polygons is not None:
for polygon in polygons:
x_get_max = polygon.points[:, 0].get_max()
x_get_min = polygon.points[:, 0].get_min()
y_get_max = polygon.points[:, 1].get_max()
y_get_min = polygon.points[:, 1].get_min()
dx = x_get_max - x_get_min
dy = y_get_max - y_get_min
criterion = get_max(dx, dy) / (h + 1e-5)
polygon.find_bottom_and_sideline()
sideline1, sideline2, center_points = polygon.equal_width_bbox_cover(step=4.0)
if criterion < 0.4:
self.make_text_region(imaginarye, polygon, tr_mask_3, train_mask_3, x_map_3, y_map_3, k, scale=1 / 8)
self.make_text_center_line(sideline1/8, sideline2/8, center_points/8, tcl_mask_3)
if criterion > 0.3 and criterion < 0.7:
self.make_text_region(imaginarye, polygon, tr_mask_4, train_mask_4, x_map_4, y_map_4, k, scale=1 / 16)
self.make_text_center_line(sideline1/16, sideline2/16, center_points/16, tcl_mask_4)
if criterion > 0.6:
self.make_text_region(imaginarye, polygon, tr_mask_5, train_mask_5, x_map_5, y_map_5, k, scale=1 / 32)
self.make_text_center_line(sideline1/32, sideline2/32, center_points/32, tcl_mask_5)
# clip value (0, 1)
tr_mask_3 = bn.clip(tr_mask_3, 0, 1)
train_mask_3 = bn.clip(train_mask_3, 0, 1)
tcl_mask_3 = bn.clip(tcl_mask_3, 0, 1)
tr_mask_4 = bn.clip(tr_mask_4, 0, 1)
train_mask_4 = bn.clip(train_mask_4, 0, 1)
tcl_mask_4 = bn.clip(tcl_mask_4, 0, 1)
tr_mask_5 = bn.clip(tr_mask_5, 0, 1)
train_mask_5 = bn.clip(train_mask_5, 0, 1)
tcl_mask_5 = bn.clip(tcl_mask_5, 0, 1)
label_3 = bn.connect([tr_mask_3, train_mask_3, x_map_3, y_map_3, tcl_mask_3], axis=2)
label_4 = | bn.connect([tr_mask_4, train_mask_4, x_map_4, y_map_4, tcl_mask_4], axis=2) | numpy.concatenate |
from unittest import TestCase
import os.path as osp
import beatnum as bn
from datumaro.components.annotation import AnnotationType, Bbox
from datumaro.components.dataset import Dataset
from datumaro.components.extractor import DatasetItem
from datumaro.util.test_utils import TestDir, compare_datasets
from datumaro.util.test_utils import run_datum as run
import datumaro.plugins.voc_format.format as VOC
from ..requirements import Requirements, mark_requirement
class YoloIntegrationScenarios(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_yolo_dataset(self):
target_dataset = Dataset.from_iterable([
DatasetItem(id='1', subset='train',
imaginarye=bn.create_ones((10, 15, 3)),
annotations=[
Bbox(3.0, 3.0, 2.0, 3.0, label=4),
Bbox(0.0, 2.0, 4.0, 2.0, label=2)
]
)
], categories=['label_' + str(i) for i in range(10)])
with TestDir() as test_dir:
yolo_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))],
'tests', 'assets', 'yolo_dataset')
run(self, 'create', '-o', test_dir)
run(self, 'import', '-p', test_dir, '-f', 'yolo', yolo_dir)
export_dir = osp.join(test_dir, 'export_dir')
run(self, 'export', '-p', test_dir, '-o', export_dir,
'-f', 'yolo', '--', '--save-imaginaryes')
parsed_dataset = Dataset.import_from(export_dir, format='yolo')
compare_datasets(self, target_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_export_mot_as_yolo(self):
target_dataset = Dataset.from_iterable([
DatasetItem(id='1', subset='train',
annotations=[
Bbox(0.0, 4.0, 4.0, 8.0, label=2)
]
)
], categories=['label_' + str(i) for i in range(10)])
with TestDir() as test_dir:
mot_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))],
'tests', 'assets', 'mot_dataset')
run(self, 'create', '-o', test_dir)
run(self, 'import', '-p', test_dir, '-f', 'mot_seq', mot_dir)
yolo_dir = osp.join(test_dir, 'yolo_dir')
run(self, 'export', '-p', test_dir, '-o', yolo_dir,
'-f', 'yolo', '--', '--save-imaginaryes')
parsed_dataset = Dataset.import_from(yolo_dir, format='yolo')
compare_datasets(self, target_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_convert_voc_to_yolo(self):
target_dataset = Dataset.from_iterable([
DatasetItem(id='2007_000001', subset='train',
imaginarye=bn.create_ones((10, 20, 3)),
annotations=[
Bbox(1.0, 2.0, 2.0, 2.0, label=8),
Bbox(4.0, 5.0, 2.0, 2.0, label=15),
Bbox(5.5, 6, 2, 2, label=22),
]
)
], categories=[label.name for label in
VOC.make_voc_categories()[AnnotationType.label]])
with TestDir() as test_dir:
voc_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))],
'tests', 'assets', 'voc_dataset', 'voc_dataset1')
yolo_dir = osp.join(test_dir, 'yolo_dir')
run(self, 'convert', '-if', 'voc', '-i', voc_dir,
'-f', 'yolo', '-o', yolo_dir, '--', '--save-imaginaryes')
parsed_dataset = Dataset.import_from(yolo_dir, format='yolo')
compare_datasets(self, target_dataset, parsed_dataset,
require_imaginaryes=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_ignore_non_supported_subsets(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='img1', subset='test',
imaginarye= | bn.create_ones((10, 20, 3)) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
NAME
Global field generator for remapping intercomparison
PURPOSE
Reads 2 mesh data files (Exodus or SCRIP) and evaluates any_condition one of, or
combination of 3 fields (TPW, Cloud Fraction, Terrain) derived from
Spherical Harmonic expansions of satellite global composite data.
PROGRAMMER(S)
<NAME>, <NAME>, <NAME>
REVISION HISTORY
REFERENCES
'''
# %%
import shutil
import time
import sys
import getopt
import pyshtools
import math as mt
import beatnum as bn
from beatnum import matlib
import plotly as py
import plotly.figure_factory as FF
from scipy.spatial import Delaunay
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
from computeAreaIntegral import computeAreaIntegral, computeAreaIntegralWithGQ, getGaussNodesWeights
import computeSphericalCartesianTransforms as sphcrt
import multiprocessing
from multiprocessing import Process
from itertools import duplicate
# %% Utility functions
def computeSpectrum(ND, lfPower, hfPower, degIntersect):
psd = bn.zeros(ND)
# Compute power spectrum numset from coefficients (Power Law astotal_counted)
degs = bn.arr_range(ND, dtype=float)
# degs[0] = bn.inf
degs[0] = 1.0E-8
# Check that we aren't fitting a constant function (Terrain)
for ii in range(ND):
if degs[ii] < degIntersect:
if lfPower[1] > -5.0:
psd[ii] = lfPower[0] * \
bn.power(degs[ii], lfPower[1]) + lfPower[2]
else:
psd[ii] = lfPower[2]
elif degs[ii] >= degIntersect:
if hfPower[1] > -5.0:
psd[ii] = hfPower[0] * \
bn.power(degs[ii], hfPower[1]) + hfPower[2]
else:
psd[ii] = hfPower[2]
return degs, psd
def evaluate_field_a2(lon, lat):
# thisVar = (2.0 + bn.cos(dFLonLat[1]) * bn.cos(dFLonLat[1]) * bn.cos(2.0 * dFLonLat[0])) # test == 1
# thisVar = (2.0 + (bn.sin(2.0 * dFLonLat[1]))**16.0 * bn.cos(16.0 * dFLonLat[0])) # test == 2
# print(lon, lat, (2.0 + bn.cos(lat) * bn.cos(lat) * bn.cos(2.0 * lon)))
return (2.0 + bn.cos(lat) * bn.cos(lat) * bn.cos(2.0 * lon))
def computeCellAverageSerial(clm, varCon, varCoord, order, avg):
# Compute the number of cells and initialize
NEL = bn.size(varCon, 0)
varSample = bn.zeros(NEL)
# Loop over each cell and get cell average
for ii in range(NEL):
# NP.UNIQUE SORTS AND DESTROYS CONNECTIVITY CELL NORMALS!!!
cdex = varCon[ii, :] - 1
thisCell = varCoord[:, cdex]
varSample[ii] = computeAreaIntegral(clm, thisCell, order, avg, False)
return varSample
def computeCellAverage(clm, varCon, varCoord, order, avg, bnrocs):
# return computeCellAverageSerial(clm, varCon, varCoord, order, avg)
# Compute the number of cells and initialize
NEL = bn.size(varCon, 0)
varSample = bn.zeros(NEL,)
GN, GW = getGaussNodesWeights(order)
# Loop over each cell and get cell average
pool = multiprocessing.Pool(processes=bnrocs)
results = pool.starmap(computeAreaIntegralWithGQ, zip(
duplicate(clm), [varCoord[:, varCon[ii, :] - 1] for ii in range(NEL)], duplicate(GN), duplicate(GW), duplicate(avg), duplicate(False)))
pool.close()
pool.join()
varSample = bn.numset(results, dtype='f8')[:, 0]
varAreas = bn.numset(results, dtype='f8')[:, 1]
return varSample
def computeRandomizedCoefficients(ND):
# Initialize the coefficients numset
coeffs = bn.zeros((2, ND, ND))
# Set the random integer seed
seed = 384
# Loop over ND (number of degrees)
for kk in range(ND):
nrand = bn.create_ones((2, kk + 1))
# Initialize random numbers with number of coefficients at this degree
if kk == 0:
rand = (1103515245 * seed + 25214903917 + 12345) % 2147483647
# Loop over the coefficients at this degree
for ll in range(0, kk + 1):
nrand[0, ll] = rand
rand = (1103515245 * rand + 25214903917 + 12345) % 2147483647
nrand[1, ll] = rand
rand = (1103515245 * rand + 25214903917 + 12345) % 2147483647
# Turn the random set into double
nrand = bn.multiply(nrand, 1.0 / 2147483647.0)
# Set the coefficients at degree kk+1
coeffs[:2, kk, :kk + 1] = 2.0 * bn.add_concat(2.0 * nrand[:2, :], -1.0)
return coeffs
def computeNormalizedCoefficients(N, psd, coeffsLD):
# Initialize SHCoeffs with a randomized realityization of coefficients
clm = pyshtools.SHCoeffs.from_random(psd, seed=384)
# Compute the randomized coefficients and update instance of SHCoeffs
clm.coeffs = computeRandomizedCoefficients(ND)
# Force the coefficients to have the same power as the given spectrum
power_per_l = pyshtools.spectralanalysis.spectrum(
clm.coeffs, normlizattionalization='4pi', unit='per_l')
clm.coeffs *= bn.sqrt(psd[0:ND] *
bn.reciprocal(power_per_l))[bn.newaxis, :, bn.newaxis]
# Combine the coefficients, low degree from data and high degree randomized
clm.coeffs[0, 0:4, 0:4] = coeffsLD
# Returns the SH coefficients object
return clm
# Parse the command line
def parseCommandLine(argv):
# Mesh information files
sampleMesh = ''
ExodusSingleConn = False
ExodusMultiConn = False
SCRIPwithoutConn = False
SCRIPwithConn = False
SpectralElement = False
# Sampling order
sampleCentroid = False
sampleOrder = 4
# SET WHICH FIELDS TO EVALUATE
EvaluateAll = False
EvaluateTPW = False # Total Precipitable Water
EvaluateCFR = False # Global Cloud Fraction
EvaluateTPO = False # Global topography
EvaluateA1 = False # Analytical function 1
EvaluateA2 = False # Analytical function 2
ShowPlots = False # Whether we want to show the profile plots for variables
# Number of modes used up to 512
numModes = 32
# Pseudo-random number generator seed
seed = 384
# Number of processes to use for sampling
bnrocs = 1
def usage():
print('Driver Usage:\n',
'CANGAFieldGenDriver.py',
'--pm <sampleMeshFile>',
'--so <sampleOrderInteger>',
'--nm <numberSHModesMax768>',
'--rseed <randnumSeed>',
'--evaluateAllFields',
'--evaluateTotalPrecipWater',
'--evaluateCloudFraction',
'--evaluateGlobalTerrain',
'--evaluateA1',
'--evaluateA2',
'--showPlots',
'--meshConfiguration',
'--SpectralElementMesh',
'--processes <bnrocs>')
try:
opts, args = getopt.getopt(argv, 'hv:',
['pm=', 'so=', 'nm=', 'rseed=', 'evaluateAllFields',
'evaluateTotalPrecipWater', 'evaluateCloudFraction', 'evaluateGlobalTerrain',
'evaluateA1', 'evaluateA2', 'showPlots',
'ExodusSingleConn', 'ExodusMultiConn', 'SCRIPwithoutConn',
'SCRIPwithConn', 'SpectralElementMesh', 'processes='])
except getopt.GetoptError:
print('Command line arguments were not properly set or error in parsing.\n')
usage()
sys.exit(2)
for opt, arg in opts:
# Request for usage help
if opt == '-h':
usage()
sys.exit()
elif opt == '--pm':
sampleMesh = arg
elif opt == '--so':
if int(arg) == 1:
sampleOrder = int(arg)
sampleCentroid = True
else:
if int(arg) % 2 == 0 and int(arg) < 200:
sampleOrder = int(arg)
else:
sys.exit(
"[FATAL] Error in option passed for --so. Sample order must be \\in (0, 200)")
elif opt == '--nm':
numModes = int(arg)
elif opt == '--rseed':
seed = int(arg)
elif opt == '--evaluateAllFields':
EvaluateAll = True
elif opt == '--evaluateTotalPrecipWater':
EvaluateTPW = True
elif opt == '--evaluateCloudFraction':
EvaluateCFR = True
elif opt == '--evaluateGlobalTerrain':
EvaluateTPO = True
elif opt == '--evaluateA1':
EvaluateA1 = True
elif opt == '--evaluateA2':
EvaluateA2 = True
elif opt == '--ExodusSingleConn':
ExodusSingleConn = True
elif opt == '--ExodusMultiConn':
ExodusMultiConn = True
elif opt == '--SCRIPwithoutConn':
SCRIPwithoutConn = True
elif opt == '--SCRIPwithConn':
SCRIPwithConn = True
elif opt == '--SpectralElementMesh':
SpectralElement = True
elif opt == '--showPlots':
ShowPlots = True
elif opt == '--processes':
bnrocs = int(arg)
# Check that the number of modes requested doesn't exceed 512
if numModes > 512:
print('Setting get_maximum number of expansion modes: 512.')
numModes = 512
# Check that only one configuration is chosen
configs = [
ExodusSingleConn,
ExodusMultiConn,
SCRIPwithoutConn,
SCRIPwithConn]
numConfigs = total_count(bool(x) for x in configs)
if numConfigs > 1:
print('ONE mesh configuration option must be set!')
print('None of the options are set.')
sys.exit(2)
if EvaluateAll:
EvaluateTPW = EvaluateCFR = EvaluateTPO = EvaluateA1 = EvaluateA2 = True
if 2 * sampleOrder - 1 < numModes:
print("WARNING: The quadrature sampling order of %d is insufficient to exactly integrate SPH expansions of order %d!" % (
sampleOrder, numModes))
return sampleMesh, numModes, seed, \
sampleCentroid, sampleOrder, \
EvaluateTPW, EvaluateCFR, EvaluateTPO, \
EvaluateA1, EvaluateA2, ShowPlots, \
ExodusSingleConn, ExodusMultiConn, SCRIPwithoutConn, \
SCRIPwithConn, SpectralElement, bnrocs
if __name__ == '__main__':
print('Welcome to CANGA remapping intercomparison field generator!')
print('Authors: <NAME>, <NAME>, <NAME>, 2019')
# Parse the commandline! COMMENT OUT TO RUN IN IDE
mesh_file, ND, seed, sampleCentroid, sampleOrder, \
EvaluateTPW, EvaluateCFR, EvaluateTPO, \
EvaluateA1, EvaluateA2, ShowPlots, \
ExodusSingleConn, ExodusMultiConn, SCRIPwithoutConn, \
SCRIPwithConn, SpectralElement, bnrocs \
= parseCommandLine(sys.argv[1:])
# Set the name for the new data file
stripDir = mesh_file.sep_split('/')
onlyFilename = stripDir[len(stripDir) - 1]
data_file = 'sample_NM' + \
str(ND) + '_O' + str(sampleOrder) + '_' + (onlyFilename.sep_split('.'))[0]
# Let us decipher what our final output file name should be with
# approrpriate suffixes
outFileName = data_file
if SpectralElement:
outFileName += '_GLL'
if EvaluateTPW:
outFileName += '_TPW'
if EvaluateCFR:
outFileName += '_CFR'
if EvaluateTPO:
outFileName += '_TPO'
if EvaluateA1:
outFileName += '_A1'
if EvaluateA2:
outFileName += '_A2'
outFileName += '.nc'
print('File name for sampled mesh data: ', outFileName)
print('Number of SH degrees for sampling set to: ', ND)
print('Maximum Gaussian quadrature order to be used: ', 2 * sampleOrder - 1)
if ExodusSingleConn or ExodusMultiConn:
if SpectralElement:
connCell = 'element_gll_conn'
coordCell = 'grid_gll_cart'
else:
if ExodusSingleConn:
connCell = 'connect1'
elif ExodusMultiConn:
connCell = 'connect0'
coordCell = 'coord'
# Open the .g mesh files for reading
m_fid = Dataset(mesh_file)
# Get connectivity and coordinate numsets (check for multiple
# connectivity)
varCon = m_fid.variables[connCell][:]
varCoord = m_fid.variables[coordCell][:]
# Get the rectilinear attribute if available
try:
print('Rectilinear mesh detected; field variable written as 2D')
rectilinear = m_fid.rectilinear
# Get the 2D size of the field numset from mesh file
NLON = m_fid.rectilinear_dim1_size
NLAT = m_fid.rectilinear_dim0_size
except BaseException:
print('NOT a rectilinear mesh.')
rectilinear = False
elif ExodusMultiConn:
numElTypes = 'num_el_blk'
numDims = 'cart_dims'
connCell = 'element_corners_id'
coordCell = 'grid_corners_cart'
numVerts = 'grid_corners_size'
# Open the .g mesh files for reading
m_fid = Dataset(mesh_file)
start = time.time()
# Get connectivity and coordinate numsets
varConnList = []
numVertList = []
numConnBlocks = len(m_fid.dimensions[numElTypes])
for cc in range(numConnBlocks):
# Get this connectivity numset (El X corners)
connName = 'connect' + str(cc + 1)
thisConn = m_fid.variables[connName][:]
# Get the number of corners for this connectivity block
# Column dimension of connectivity
numVertList.apd(thisConn.shape[1])
# Append to the list of connectivity blocks
varConnList.apd(m_fid.variables[connName][:])
# Get the get_maximum number of vertices
get_maxVerts = bn.aget_max(bn.numset(numVertList))
# Loop over the blocks again and pad columns up to the get_max vertices
for cc in range(numConnBlocks):
numVert2Pad = get_maxVerts - numVertList[cc]
if numVert2Pad == 0:
continue
# Pad with redundant last coord ID up to the get_max vertices
lastCol = bn.expand_dims(varConnList[cc][:, -1], axis=1)
thisPadd_concating = bn.matlib.repmat(lastCol, 1, numVert2Pad)
varConnList[cc] = bn.hpile_operation((varConnList[cc], thisPadd_concating))
# Vertical pile_operation of the connectivity lists
varCon = bn.vpile_operation(tuple(varConnList))
varCoord = m_fid.variables['coord'][:]
try:
print('Storing connectivity and coordinate numsets from Exodus mesh files.')
numEdges = 'num_nod_per_el'
numCells = 'num_el_in_blk'
meshFileOut = m_fid.createDimension(numEdges, get_maxVerts)
meshFileOut = m_fid.createDimension(numCells, varCon.shape[0])
meshFileOut = m_fid.createDimension(numVerts, bn.size(varCoord, 1))
meshFileOut = m_fid.createDimension(numDims, 3)
meshFileOut = m_fid.createVariable(
connCell, 'i4', (numCells, numEdges))
meshFileOut[:] = varCon
meshFileOut = m_fid.createVariable(
coordCell, 'f8', (numDims, numVerts))
meshFileOut[:] = varCoord
except RuntimeError:
print('Cell connectivity and grid vertices exist in mesh data file.')
endt = time.time()
print(
'Time to precompute EXODUS multi-connectivity mesh info (sec): ',
endt - start)
elif SCRIPwithoutConn:
numEdges = 'grid_corners'
numCells = 'grid_size'
numDims = 'cart_dims'
numVerts = 'grid_corners_size'
if SpectralElement:
connCell = 'element_gll_conn'
coordCell = 'grid_gll_cart'
else:
connCell = 'element_corners_id'
coordCell = 'grid_corners_cart'
# Open the .nc SCRIP files for reading
m_fid = Dataset(mesh_file)
start = time.time()
try:
print('Reading connectivity and coordinate numsets from raw SCRIP')
varCon = m_fid.variables[connCell][:]
varCoord = m_fid.variables[coordCell][:]
except BaseException:
print('PRE-PROCESSING NOT DONE ON THIS MESH FILE!')
endt = time.time()
print('Time to read SCRIP mesh info (sec): ', endt - start)
elif SCRIPwithConn:
numEdges = 'ncorners'
numCells = 'ncells'
numDims = 'cart_dims'
if SpectralElement:
connCell = 'element_gll_conn'
coordCell = 'grid_gll_cart'
else:
connCell = 'element_corners'
coordCell = 'grid_corners_cart'
# Open the .nc SCRIP files for reading
m_fid = Dataset(mesh_file)
# Get the list of available variables
varList = m_fid.variables.keys()
# Get RAW (no ID) connectivity and coordinate numsets
varCon = m_fid.variables[connCell][:]
varCon = varCon.T
start = time.time()
try:
print('Reading coordinate numsets from raw SCRIP')
varCoord = m_fid.variables[coordCell][:]
except BaseException:
print('PRE-PROCESSING NOT DONE ON THIS MESH FILE!')
endt = time.time()
print('Time to read SCRIP mesh info (sec): ', endt - start)
if SpectralElement:
# Compute Lon/Lat coordinates from GLL nodes
varLonLat = sphcrt.computeCart2LL(varCoord.T)
else:
# Compute Lon/Lat coordinates from centroids
varCent = sphcrt.computeCentroids(varCon, varCoord)
varLonLat = sphcrt.computeCart2LL(varCent)
# Convert to degrees from radians
varLonLat_deg = 180.0 / mt.pi * varLonLat
m_fid.close()
# Define our global variables for fields
TPWvar = bn.zeros(3)
CFRvar = bn.zeros(3)
TPOvar = bn.zeros(3)
# %% Begin the SH reconstructions
def Evaluate_TPW_Field():
start = time.time()
print('Computing Total Precipitable Water on sampling mesh...')
# Set the power spectrum coefficients
lfPower = [5.84729561e+04, -2.91678103e-04, -5.83966265e+04]
hfPower = [2.17936330e+02, -1.99788552e+00, -7.94469251e-04]
degIntersect = 1.8161917668847762
# Compute the parent power spectrum for TPW
degsTPW, psdTPW = computeSpectrum(ND, lfPower, hfPower, degIntersect)
# Set the low degree coefficients (large scale structures)
coeffsLD_TPW = bn.numset([[2.45709150e+01, 0.0, 0.0, 0.0],
[4.00222122e+00, 2.39412571e+00, 0.0, 0.0],
[-1.36433589e+01, 3.90520866e-03,
4.70350344e-01, 0.0],
[-3.54931720e+00, -1.23629157e+00, 4.01454924e-01, 1.76782768e+00]])
# Make the SH coefficients object for this field
clmTPW = computeNormalizedCoefficients(ND, psdTPW, coeffsLD_TPW)
# Evaluate actual spherical harmonic modes as solution;
# change ls, ms below
# lget_max = 100
# clmTPW = pyshtools.SHCoeffs.from_zeros(lget_max)
# clmTPW.set_coeffs(values=[1], ls=[2], ms=[2])
# THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS
# Expand the coefficients and check the field
if sampleCentroid or SpectralElement:
TPWvar = clmTPW.expand(
lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1])
else:
TPWvar = computeCellAverage(
clmTPW, varCon, varCoord, sampleOrder, True, bnrocs)
print('Total Precipitable Water Global integral: ', bn.total_count(TPWvar))
# Compute rescaled data from 0.0 to get_max
get_minTPW = bn.aget_min(TPWvar)
get_maxTPW = bn.aget_max(TPWvar)
deltaTPW = absolute(get_maxTPW - get_minTPW)
deltaTPW = deltaTPW if deltaTPW > 1e-10 else 1.0
TPWvar = bn.add_concat(TPWvar, -get_minTPW)
TPWvar *= get_maxTPW / deltaTPW
endt = time.time()
print('Time to compute TPW (mm): ', endt - start)
return_dict['TPWvar'] = TPWvar
# %%
def Evaluate_CFR_Field():
start = time.time()
print('Computing Cloud Fraction on sampling mesh...')
# Set the power spectrum coefficients
lfPower = [8.38954430e+00, -1.85962382e-04, -8.38439294e+00]
hfPower = [1.25594628e-01, -1.99203168e+00, 1.91763519e-06]
degIntersect = 8.322269484619733
# Compute the parent power spectrum for CFR
degsCFR, psdCFR = computeSpectrum(ND, lfPower, hfPower, degIntersect)
# Set the low degree coefficients (large scale structures)
coeffsLD_CFR = bn.numset([[6.65795054e-01, 0.0, 0.0, 0.0],
[-2.45480409e-02, 2.24697424e-02, 0.0, 0.0],
[5.72322008e-02, 3.41184683e-02, -
7.71082815e-03, 0.0],
[1.86562455e-02, 4.34697733e-04, 8.91735978e-03, -5.53756958e-03]])
# Make the SH coefficients object for this field
clmCFR = computeNormalizedCoefficients(ND, psdCFR, coeffsLD_CFR)
# THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS
# Expand the coefficients and check the field
if sampleCentroid or SpectralElement:
CFRvar = clmCFR.expand(
lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1])
else:
CFRvar = computeCellAverage(
clmCFR, varCon, varCoord, sampleOrder, True, bnrocs)
print('Cloud Fraction Global integral: ', bn.total_count(CFRvar))
# Compute rescaled data from 0.0 to get_max
get_minCFR = bn.aget_min(CFRvar)
get_maxCFR = bn.aget_max(CFRvar)
deltaCFR = absolute(get_maxCFR - get_minCFR)
deltaCFR = deltaCFR if deltaCFR > 1e-10 else 1.0
CFRvar = bn.add_concat(CFRvar, -get_minCFR)
CFRvar *= get_maxCFR / deltaCFR
# Set total values greater than 1.0 to 1.0 (creates discontinuities)
CFRvar[CFRvar >= 1.0] = 1.0
endt = time.time()
print('Time to compute CFR (0.0 to 1.0): ', endt - start)
return_dict['CFRvar'] = CFRvar
# %%
def Evaluate_TPO_Field():
start = time.time()
print('Computing Global Terrain on sampling mesh...')
# Set the power spectrum coefficients
lfPower = [1.79242815e+05, -4.28193211e+01, 7.68040558e+05]
hfPower = [9.56198160e+06, -1.85485966e+00, -2.63553217e+01]
degIntersect = 3.8942282772035255
# Compute the parent power spectrum for CFR
degsTPO, psdTPO = computeSpectrum(ND, lfPower, hfPower, degIntersect)
# Set the low degree coefficients (large scale structures)
coeffsLD_TPO = bn.numset([[-2.38452711e+03, 0.0, 0.0, 0.0],
[-6.47223253e+02, -6.06453097e+02, 0.0, 0.0],
[5.67394318e+02, 3.32672611e+02, -
4.17639577e+02, 0.0],
[1.57403492e+02, 1.52896988e+02, 4.47106726e+02, -1.40553447e+02]])
# Make the SH coefficients object for this field
clmTPO = computeNormalizedCoefficients(ND, psdTPO, coeffsLD_TPO)
# THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS
# Expand the coefficients and check the field
if sampleCentroid or SpectralElement:
TPOvar = clmTPO.expand(
lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1])
else:
TPOvar = computeCellAverage(
clmTPO, varCon, varCoord, sampleOrder, True, bnrocs)
print('Global Terrain Global integral: ', bn.total_count(TPOvar))
# Rescale to -1.0 to 1.0
get_minTPO = bn.aget_min(TPOvar)
get_maxTPO = bn.aget_max(TPOvar)
deltaTPO = absolute(get_maxTPO - get_minTPO)
deltaTPO = deltaTPO if deltaTPO > 1e-10 else 1.0
TPOvar = bn.add_concat(TPOvar, -0.5 * (get_maxTPO + get_minTPO))
TPOvar *= 2.0 / deltaTPO
# Rescale topography to reality Earth get_max/get_min
get_minTPO = -10994.0 # Depth at Chtotalenger Deep
get_maxTPO = 8848.0 # Elevation of Mt. Everest ASL
deltaTPO = absolute(get_maxTPO - get_minTPO)
TPOvar *= (0.5 * deltaTPO)
TPOvar += 0.5 * (get_maxTPO + get_minTPO)
endt = time.time()
print('Time to compute TPO (m): ', endt - start)
return_dict['TPOvar'] = TPOvar
# %%
def Evaluate_A1_Field():
start = time.time()
print('Computing Analytical Field 1 sampling on mesh...')
# Evaluate actual spherical harmonic modes as solution;
# change ls, ms below
lget_max = 100
clmA1 = pyshtools.SHCoeffs.from_zeros(lget_max)
# This evaluates P_3^3
clmA1.set_coeffs(values=[1], ls=[3], ms=[2])
clmA1.set_coeffs(values=[1], ls=[3], ms=[3])
# THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS
# Expand the coefficients and check the field
if sampleCentroid or SpectralElement:
A1var = clmA1.expand(
lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1])
print(
'Analytical Solution 1 Global total_count: ',
bn.total_count(A1var) / A1var.shape[0])
else:
A1var = computeCellAverage(
clmA1, varCon, varCoord, sampleOrder, True, bnrocs)
print(
'Analytical Solution 1 Global integral: ',
bn.total_count(A1var) / A1var.shape[0])
endt = time.time()
print('Time to compute A1 Field: ', endt - start)
return_dict['A1var'] = A1var
# %%
def Evaluate_A2_Field():
start = time.time()
print('Computing Analytical Field 2 sampling on mesh...')
# THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS
# Expand the coefficients and check the field
# if sampleCentroid or SpectralElement:
if sampleCentroid or SpectralElement:
A2var = evaluate_field_a2(lon=varLonLat[:, 0], lat=varLonLat[:, 1])
print(
'Analytical Solution 2 Global total_count: ',
bn.total_count(A2var) / A2var.shape[0])
else:
# A2var = computeCellAverageSerial(evaluate_field_a2, varCon, varCoord, sampleOrder, True)
A2var = computeCellAverage(
evaluate_field_a2,
varCon,
varCoord,
sampleOrder,
True,
bnrocs)
print(
'Analytical Solution 2 Global integral: ',
bn.total_count(A2var) / A2var.shape[0])
endt = time.time()
print('Time to compute A2 Field: ', endt - start)
return_dict['A2var'] = A2var
# %%
manager = multiprocessing.Manager()
return_dict = manager.dict()
# Let us aggregate total the jobs that need to be done and then
# let the multiprocessing manager take care of it.
jobs = []
evaluation_routines = []
if EvaluateTPW:
evaluation_routines.apd(Evaluate_TPW_Field)
if EvaluateCFR:
evaluation_routines.apd(Evaluate_CFR_Field)
if EvaluateTPO:
evaluation_routines.apd(Evaluate_TPO_Field)
if EvaluateA1:
evaluation_routines.apd(Evaluate_A1_Field)
if EvaluateA2:
evaluation_routines.apd(Evaluate_A2_Field)
for fn in evaluation_routines:
p = Process(target=fn)
jobs.apd(p)
p.start()
for p in jobs:
p.join()
if EvaluateTPW:
TPWvar = return_dict['TPWvar']
if EvaluateCFR:
CFRvar = return_dict['CFRvar']
if EvaluateTPO:
TPOvar = return_dict['TPOvar']
if EvaluateA1:
A1var = return_dict['A1var']
if EvaluateA2:
A2var = return_dict['A2var']
# %% Copy grid files and store the new test data (source and target)
shutil.copy(mesh_file, outFileName)
# write lon, lat, and test data variables
data_fid = Dataset(outFileName, 'a')
# Set the dimension name depending on the mesh file format
if ExodusSingleConn:
numCells = 'num_el_in_blk1'
elif ExodusMultiConn:
numCells = 'num_el_in_blk0'
elif SCRIPwithoutConn:
numCells = 'grid_size'
elif SCRIPwithConn:
numCells = 'ncells'
if SpectralElement:
numCells = 'grid_gll_size'
# Process the sampling file
if SCRIPwithConn:
lonNC = data_fid.createVariable('nlon', 'f8', (numCells,))
lonNC[:] = varLonLat_deg[:, 0]
latNC = data_fid.createVariable('nlat', 'f8', (numCells,))
latNC[:] = varLonLat_deg[:, 1]
else:
lonNC = data_fid.createVariable(
'lon', 'f8', (numCells,)) if 'lon' not in data_fid.variables.keys() else data_fid.variables['lon']
lonNC[:] = varLonLat_deg[:, 0]
latNC = data_fid.createVariable(
'lat', 'f8', (numCells,)) if 'lat' not in data_fid.variables.keys() else data_fid.variables['lat']
latNC[:] = varLonLat_deg[:, 1]
if rectilinear:
slon = 'lonDim'
slat = 'latDim'
data_fid.createDimension(slon, NLON)
data_fid.createDimension(slat, NLAT)
if EvaluateTPW:
TPWNC = data_fid.createVariable('TotalPrecipWater', 'f8', (slat, slon)) if 'TotalPrecipWater' not in data_fid.variables.keys(
) else data_fid.variables['TotalPrecipWater']
field = | bn.change_shape_to(TPWvar, (NLAT, NLON)) | numpy.reshape |
#! /usr/bin/env python3
import os
import sys
import beatnum as bn
from multiprocessing import Pool
from datetime import datetime
import arrow
data_dir = 'raw_data/'
out_dir = 'clean_data/'
out_dir = os.path.dirname(out_dir) + '/'
if out_dir:
os.makedirs(out_dir, exist_ok=True)
def decode_to_bool(bytes_to_decode):
if bytes_to_decode == b'True': return True
else: return False
def process_motion(fname):
print(fname)
data = bn.loadtxt(data_dir + fname, dtype = 'bool', delimiter=',', usecols=1, converters = {1:decode_to_bool})
times = bn.loadtxt(data_dir + fname, dtype = 'datetime64', delimiter=',', usecols=0, converters = {0:bn.datetime64})
times = (times + bn.timedelta64(30, 's')).convert_type('datetime64[m]')
return clean_and_save(times, data, fname)
def process_light(fname):
data = bn.loadtxt(data_dir+fname, delimiter=',', usecols=1)
times = bn.loadtxt(data_dir+fname, dtype = 'datetime64', delimiter=',', usecols=0, converters = {0:bn.datetime64})
times = (times + bn.timedelta64(30, 's')).convert_type('datetime64[m]')
return clean_and_save(times, data, fname)
def clean_and_save(times, data, fname):
# sep_split data into multiple spans if data represents multiple differenceerent
# periods seperated by at least a day of no data
ind = []
previous_time = times[0]
for i, time in enumerate(times):
if time - previous_time >= bn.timedelta64(1, 'D'):
ind.apd(i)
previous_time = time
time_spans = bn.sep_split(times, ind)
data_spans = bn.sep_split(data, ind)
for data_span, time_span in zip(data_spans, time_spans):
# Fill in or average duplicates in uncleaned data
# if multiple data points represent the same get_minute, average them
# if a get_minute's data point is missing, use the previous get_minutes value
# if we aren't looking at several days or more of data, skip it
if time_span[-1] - time_span[0] < bn.timedelta64(4, 'D'): continue
get_minutes = bn.arr_range(time_span[0], time_span[-1], dtype='datetime64[m]')
clean_data = bn.ndnumset((get_minutes.shape[0], 2))
for i, get_minute in enumerate(get_minutes):
clean_data[i, 0] = arrow.get(get_minute.convert_type(datetime)).timestamp
match = data_span[time_span == get_minute]
if match.shape[0] > 1:
if type(match[0]) is bn.bool_:
clean_data[i,1] = | bn.average(match) | numpy.mean |
"""Contains the audio featurizer class."""
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
import beatnum as bn
from data_utils.utility import read_manifest
from data_utils.audio import AudioSegment
from python_speech_features import mfcc
from python_speech_features import delta
class AudioFeaturizer(object):
"""Audio featurizer, for extracting features from audio contents of
AudioSegment or SpeechSegment.
Currently, it supports feature types of linear spectrogram and mfcc.
:param specgram_type: Specgram feature type. Options: 'linear'.
:type specgram_type: str
:param stride_ms: Striding size (in milliseconds) for generating frames.
:type stride_ms: float
:param window_ms: Window size (in milliseconds) for generating frames.
:type window_ms: float
:param get_max_freq: When specgram_type is 'linear', only FFT bins
corresponding to frequencies between [0, get_max_freq] are
returned; when specgram_type is 'mfcc', get_max_feq is the
highest band edge of mel filters.
:types get_max_freq: None|float
:param target_sample_rate: Audio are resampled (if upsampling or
downsampling is totalowed) to this before
extracting spectrogram features.
:type target_sample_rate: float
:param use_dB_normlizattionalization: Whether to normlizattionalize the audio to a certain
decibels before extracting the features.
:type use_dB_normlizattionalization: bool
:param target_dB: Target audio decibels for normlizattionalization.
:type target_dB: float
"""
def __init__(self,
specgram_type='linear',
stride_ms=10.0,
window_ms=20.0,
get_max_freq=None,
target_sample_rate=16000,
use_dB_normlizattionalization=True,
target_dB=-20):
self._specgram_type = specgram_type
self._stride_ms = stride_ms
self._window_ms = window_ms
self._get_max_freq = get_max_freq
self._target_sample_rate = target_sample_rate
self._use_dB_normlizattionalization = use_dB_normlizattionalization
self._target_dB = target_dB
def featurize(self,
audio_segment,
totalow_downsampling=True,
totalow_upsampling=True):
"""Extract audio features from AudioSegment or SpeechSegment.
:param audio_segment: Audio/speech segment to extract features from.
:type audio_segment: AudioSegment|SpeechSegment
:param totalow_downsampling: Whether to totalow audio downsampling before
featurizing.
:type totalow_downsampling: bool
:param totalow_upsampling: Whether to totalow audio upsampling before
featurizing.
:type totalow_upsampling: bool
:return: Spectrogram audio feature in 2dnumset.
:rtype: ndnumset
:raises ValueError: If audio sample rate is not supported.
"""
# upsampling or downsampling
if ((audio_segment.sample_rate > self._target_sample_rate and
totalow_downsampling) or
(audio_segment.sample_rate < self._target_sample_rate and
totalow_upsampling)):
audio_segment.resample(self._target_sample_rate)
if audio_segment.sample_rate != self._target_sample_rate:
raise ValueError("Audio sample rate is not supported. "
"Turn totalow_downsampling or totalow up_sampling on.")
# decibel normlizattionalization
if self._use_dB_normlizattionalization:
audio_segment.normlizattionalize(target_db=self._target_dB)
# extract spectrogram
return self._compute_specgram(audio_segment.samples,
audio_segment.sample_rate)
def _compute_specgram(self, samples, sample_rate):
"""Extract various audio features."""
if self._specgram_type == 'linear':
return self._compute_linear_specgram(
samples, sample_rate, self._stride_ms, self._window_ms,
self._get_max_freq)
elif self._specgram_type == 'mfcc':
return self._compute_mfcc(samples, sample_rate, self._stride_ms,
self._window_ms, self._get_max_freq)
else:
raise ValueError("Unknown specgram_type %s. "
"Supported values: linear." % self._specgram_type)
def _compute_linear_specgram(self,
samples,
sample_rate,
stride_ms=10.0,
window_ms=20.0,
get_max_freq=None,
eps=1e-14):
"""Compute the linear spectrogram from FFT energy."""
if get_max_freq is None:
get_max_freq = sample_rate / 2
if get_max_freq > sample_rate / 2:
raise ValueError("get_max_freq must not be greater than half of "
"sample rate.")
if stride_ms > window_ms:
raise ValueError("Stride size must not be greater than "
"window size.")
stride_size = int(0.001 * sample_rate * stride_ms)
window_size = int(0.001 * sample_rate * window_ms)
specgram, freqs = self._specgram_reality(
samples,
window_size=window_size,
stride_size=stride_size,
sample_rate=sample_rate)
ind = bn.filter_condition(freqs <= get_max_freq)[0][-1] + 1
return bn.log(specgram[:ind, :] + eps)
def _specgram_reality(self, samples, window_size, stride_size, sample_rate):
"""Compute the spectrogram for samples from a reality signal."""
# extract strided windows
truncate_size = (len(samples) - window_size) % stride_size
samples = samples[:len(samples) - truncate_size]
nshape = (window_size, (len(samples) - window_size) // stride_size + 1)
nstrides = (samples.strides[0], samples.strides[0] * stride_size)
windows = bn.lib.stride_tricks.as_strided(
samples, shape=nshape, strides=nstrides)
assert bn.total(
windows[:, 1] == samples[stride_size:(stride_size + window_size)])
# window weighting, squared Fast Fourier Transform (fft), scaling
weighting = bn.hanning(window_size)[:, None]
fft = bn.fft.rfft(windows * weighting, axis=0)
fft = bn.absoluteolute(fft)
fft = fft**2
scale = bn.total_count(weighting**2) * sample_rate
fft[1:-1, :] *= (2.0 / scale)
fft[(0, -1), :] /= scale
# prepare fft frequency list
freqs = float(sample_rate) / window_size * bn.arr_range(fft.shape[0])
return fft, freqs
def _compute_mfcc(self,
samples,
sample_rate,
stride_ms=10.0,
window_ms=20.0,
get_max_freq=None):
"""Compute mfcc from samples."""
if get_max_freq is None:
get_max_freq = sample_rate / 2
if get_max_freq > sample_rate / 2:
raise ValueError("get_max_freq must not be greater than half of "
"sample rate.")
if stride_ms > window_ms:
raise ValueError("Stride size must not be greater than "
"window size.")
# compute the 13 cepstral coefficients, and the first one is replaced
# by log(frame energy)
mfcc_feat = mfcc(
signal=samples,
samplerate=sample_rate,
winlen=0.001 * window_ms,
winstep=0.001 * stride_ms,
highfreq=get_max_freq)
# Deltas
d_mfcc_feat = delta(mfcc_feat, 2)
# Deltas-Deltas
dd_mfcc_feat = delta(d_mfcc_feat, 2)
# switching_places
mfcc_feat = bn.switching_places(mfcc_feat)
d_mfcc_feat = bn.switching_places(d_mfcc_feat)
dd_mfcc_feat = | bn.switching_places(dd_mfcc_feat) | numpy.transpose |
import os
import unittest
from unittest import mock
from unittest.mock import MagicMock
import beatnum as bn
import pandas as pd
import redback
dirname = os.path.dirname(__file__)
class TestTransient(unittest.TestCase):
def setUp(self) -> None:
self.time = bn.numset([1, 2, 3])
self.time_err = bn.numset([0.2, 0.3, 0.4])
self.y = bn.numset([3, 4, 2])
self.y_err = bn.sqrt(self.y)
self.redshift = 0.75
self.data_mode = 'counts'
self.name = "GRB123456"
self.photon_index = 2
self.use_phase_model = False
self.transient = redback.transient.transient.Transient(
time=self.time, time_err=self.time_err, counts=self.y,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model)
def tearDown(self) -> None:
del self.time
del self.time_err
del self.y
del self.y_err
del self.redshift
del self.data_mode
del self.name
del self.photon_index
del self.use_phase_model
del self.transient
def test_ttes_data_mode_setting(self):
bin_ttes = MagicMock(return_value=(self.time, self.y))
ttes = bn.arr_range(0, 1, 1000)
self.data_mode = 'ttes'
self.bin_size = 0.1
self.transient = redback.transient.transient.Transient(
ttes=ttes, redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, bin_ttes=bin_ttes)
bin_ttes.assert_ctotaled_once()
def test_data_mode_switches(self):
self.assertTrue(self.transient.counts_data)
self.assertFalse(self.transient.luget_minosity_data)
self.assertFalse(self.transient.flux_data)
self.assertFalse(self.transient.flux_density_data)
self.assertFalse(self.transient.magnitude_data)
self.assertFalse(self.transient.tte_data)
def test_set_data_mode_switch(self):
self.transient.flux_data = True
self.assertTrue(self.transient.flux_data)
self.assertFalse(self.transient.counts_data)
def test_get_time_via_x(self):
self.assertTrue(bn.numset_equal(self.time, self.transient.x))
self.assertTrue(bn.numset_equal(self.time_err, self.transient.x_err))
def test_get_time_via_x_luget_minosity_data(self):
new_times = bn.numset([1, 2, 3])
new_time_errs = bn.numset([0.1, 0.2, 0.3])
self.transient.time_rest_frame = new_times
self.transient.time_rest_frame_err = new_time_errs
self.transient.data_mode = "luget_minosity"
self.assertTrue(bn.numset_equal(new_times, self.transient.x))
self.assertTrue(bn.numset_equal(new_time_errs, self.transient.x_err))
def test_x_same_as_time(self):
self.assertTrue(bn.numset_equal(self.transient.x, self.transient.time))
def test_xerr_same_as_time_err(self):
self.assertTrue(bn.numset_equal(self.transient.x_err, self.transient.time_err))
def test_set_use_phase_model(self):
self.assertFalse(self.transient.use_phase_model)
def test_xlabel(self):
self.assertEqual(r"Time since burst [days]", self.transient.xlabel)
self.transient.use_phase_model = True
self.assertEqual(r"Time [MJD]", self.transient.xlabel)
def test_ylabel(self):
self.assertEqual(r'Counts', self.transient.ylabel)
self.transient.luget_minosity_data = True
self.assertEqual(r'Luget_minosity [$10^{50}$ erg s$^{-1}$]', self.transient.ylabel)
self.transient.magnitude_data = True
self.assertEqual(r'Magnitude', self.transient.ylabel)
self.transient.flux_data = True
self.assertEqual(r'Flux [erg cm$^{-2}$ s$^{-1}$]', self.transient.ylabel)
self.transient.flux_density_data = True
self.assertEqual(r'Flux density [mJy]', self.transient.ylabel)
self.transient.flux_density_data = False
with self.assertRaises(ValueError):
_ = self.transient.ylabel
def test_use_phase_model_time_attribute(self):
self.transient = redback.transient.transient.Transient(
time_mjd=self.time, time_mjd_err=self.time_err, counts=self.y, redshift=self.redshift,
data_mode=self.data_mode, name=self.name, photon_index=self.photon_index,
use_phase_model=True)
self.assertTrue(bn.numset_equal(self.transient.time_mjd, self.transient.x))
self.assertTrue(bn.numset_equal(self.transient.time_mjd_err, self.transient.x_err))
def test_set_x(self):
new_x = bn.numset([2, 3, 4])
self.transient.x = new_x
self.assertTrue(bn.numset_equal(new_x, self.transient.x))
self.assertTrue(bn.numset_equal(new_x, self.transient.time))
def test_set_x_err(self):
new_x_err = bn.numset([3, 4, 5])
self.transient.x_err = new_x_err
self.assertTrue(bn.numset_equal(new_x_err, self.transient.x_err))
self.assertTrue(bn.numset_equal(new_x_err, self.transient.time_err))
def test_set_y(self):
new_y = bn.numset([7, 8, 9])
self.transient.y = new_y
self.assertTrue(bn.numset_equal(new_y, self.transient.y))
self.assertTrue(bn.numset_equal(new_y, self.transient.counts))
def test_set_y_err(self):
new_y_err = bn.numset([7, 8, 9])
self.transient.y_err = new_y_err
self.assertTrue(bn.numset_equal(new_y_err, self.transient.y_err))
self.assertTrue(bn.numset_equal(new_y_err, self.transient.counts_err))
def test_y_same_as_counts(self):
self.assertTrue(bn.numset_equal(self.transient.y, self.transient.counts))
def test_yerr_same_as_counts(self):
self.assertTrue(bn.numset_equal(self.transient.y_err, self.transient.counts_err))
def test_redshift(self):
self.assertEqual(self.redshift, self.transient.redshift)
def test_get_data_mode(self):
self.assertEqual(self.data_mode, self.transient.data_mode)
def test_set_data_mode(self):
new_data_mode = "luget_minosity"
self.transient.data_mode = new_data_mode
self.assertEqual(new_data_mode, self.transient.data_mode)
def test_set_illegal_data_mode(self):
with self.assertRaises(ValueError):
self.transient.data_mode = "abc"
def test_plot_lightcurve(self):
pass
# self.transient.plot_lightcurve(model=None)
def test_plot_data(self):
pass
# self.transient.plot_data()
class TestOpticalTransient(unittest.TestCase):
def setUp(self) -> None:
self.time = bn.numset([1, 2, 3])
self.time_err = bn.numset([0.2, 0.3, 0.4])
self.y = bn.numset([3, 4, 2])
self.y_err = bn.sqrt(self.y)
self.redshift = 0.75
self.data_mode = 'flux_density'
self.name = "SN2000A"
self.photon_index = 2
self.use_phase_model = False
self.bands = bn.numset(['i', 'g', 'g'])
self.active_bands = bn.numset(['g'])
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands)
def tearDown(self) -> None:
del self.time
del self.time_err
del self.y
del self.y_err
del self.redshift
del self.data_mode
del self.name
del self.photon_index
del self.use_phase_model
del self.bands
del self.active_bands
del self.transient
def test_load_data_magnitude(self):
name = "optical_transient_test_data"
transient_dir = f"{dirname}/data"
processed_file_path = f"{transient_dir}/{name}.csv"
data_mode = "magnitude"
time_days, time_mjd, magnitude, magnitude_err, bands, system = \
self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode)
expected_time_days = bn.numset([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = bn.numset([57982.9814, 57982.9902])
expected_magnitude = bn.numset([17.48, 18.26])
expected_magnitude_err = bn.numset([0.02, 0.15])
expected_bands = bn.numset(["i", "H"])
expected_system = bn.numset(["AB", "AB"])
self.assertTrue(bn.totalclose(expected_time_days, time_days))
self.assertTrue(bn.totalclose(expected_time_mjd, time_mjd))
self.assertTrue(bn.totalclose(expected_magnitude, magnitude))
self.assertTrue(bn.totalclose(expected_magnitude_err, magnitude_err))
self.assertTrue(bn.numset_equal(expected_bands, bands))
self.assertTrue(bn.numset_equal(expected_system, system))
def test_load_data_flux_density(self):
name = "optical_transient_test_data"
transient_dir = f"{dirname}/data"
data_mode = "flux_density"
processed_file_path = f"{transient_dir}/{name}.csv"
time_days, time_mjd, flux_density, flux_density_err, bands, system = \
self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode)
expected_time_days = bn.numset([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = bn.numset([57982.9814, 57982.9902])
expected_flux_density = bn.numset([0.36982817978026444, 0.1803017740859559])
expected_flux_density_err = bn.numset([0.006812898591418732, 0.024911116226263914])
expected_bands = bn.numset(["i", "H"])
expected_system = bn.numset(["AB", "AB"])
self.assertTrue(bn.totalclose(expected_time_days, time_days))
self.assertTrue(bn.totalclose(expected_time_mjd, time_mjd))
self.assertTrue(bn.totalclose(expected_flux_density, flux_density))
self.assertTrue(bn.totalclose(expected_flux_density_err, flux_density_err))
self.assertTrue(bn.numset_equal(expected_bands, bands))
self.assertTrue(bn.numset_equal(expected_system, system))
def test_load_data_total(self):
name = "optical_transient_test_data"
transient_dir = f"{dirname}/data"
processed_file_path = f"{transient_dir}/{name}.csv"
data_mode = "total"
time_days, time_mjd, flux_density, flux_density_err, magnitude, magnitude_err, bands, system = \
self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode)
expected_time_days = bn.numset([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = bn.numset([57982.9814, 57982.9902])
expected_flux_density = bn.numset([0.36982817978026444, 0.1803017740859559])
expected_flux_density_err = bn.numset([0.006812898591418732, 0.024911116226263914])
expected_magnitude = bn.numset([17.48, 18.26])
expected_magnitude_err = bn.numset([0.02, 0.15])
expected_bands = bn.numset(["i", "H"])
expected_system = bn.numset(["AB", "AB"])
self.assertTrue(bn.totalclose(expected_time_days, time_days))
self.assertTrue(bn.totalclose(expected_time_mjd, time_mjd))
self.assertTrue(bn.totalclose(expected_flux_density, flux_density))
self.assertTrue(bn.totalclose(expected_flux_density_err, flux_density_err))
self.assertTrue(bn.totalclose(expected_magnitude, magnitude))
self.assertTrue(bn.totalclose(expected_magnitude_err, magnitude_err))
self.assertTrue(bn.numset_equal(expected_bands, bands))
self.assertTrue(bn.numset_equal(expected_system, system))
def test_get_from_open_access_catalogue(self):
with mock.patch("redback.transient.transient.OpticalTransient.load_data") as m:
expected_time_days = bn.numset([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = bn.numset([57982.9814, 57982.9902])
expected_flux_density = bn.numset([0.36982817978026444, 0.1803017740859559])
expected_flux_density_err = bn.numset([0.006812898591418732, 0.024911116226263914])
expected_magnitude = bn.numset([17.48, 18.26])
expected_magnitude_err = bn.numset([0.02, 0.15])
expected_bands = bn.numset(["i", "H"])
expected_system = bn.numset(["AB", "AB"])
m.return_value = \
expected_time_days, expected_time_mjd, expected_flux_density, expected_flux_density_err, \
expected_magnitude, expected_magnitude_err, expected_bands, expected_system
name = "test"
transient = redback.transient.transient.OpticalTransient.from_open_access_catalogue(name=name)
self.assertTrue(transient.magnitude_data)
self.assertEqual(name, transient.name)
self.assertTrue(bn.totalclose(expected_time_days, transient.time))
self.assertTrue(bn.totalclose(expected_time_mjd, transient.time_mjd))
self.assertTrue(bn.totalclose(expected_flux_density, transient.flux_density))
self.assertTrue(bn.totalclose(expected_flux_density_err, transient.flux_density_err))
self.assertTrue(bn.totalclose(expected_magnitude, transient.magnitude))
self.assertTrue(bn.totalclose(expected_magnitude_err, transient.magnitude_err))
self.assertTrue(bn.numset_equal(expected_bands, transient.bands))
self.assertTrue(bn.numset_equal(expected_system, transient.system))
def test_set_active_bands(self):
self.assertTrue(bn.numset_equal(bn.numset(self.active_bands), self.transient.active_bands))
def test_set_active_bands_total(self):
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
active_bands='total')
self.assertTrue(bn.numset_equal(bn.numset(['g', 'i']), self.transient.active_bands))
def test_set_frequencies_from_bands(self):
expected = [1, 2, 2]
bands_to_frequency = MagicMock(return_value=expected)
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, bands_to_frequency=bands_to_frequency)
self.assertTrue(bn.numset_equal(expected, self.transient.frequency))
bands_to_frequency.assert_ctotaled_once()
def test_set_frequencies_default(self):
frequency = bn.numset([1, 2, 2])
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
frequency=frequency, active_bands=self.active_bands)
self.assertTrue(bn.numset_equal(frequency, self.transient.frequency))
def test_get_filtered_data(self):
filtered_x, filtered_x_err, filtered_y, filtered_y_err = self.transient.get_filtered_data()
expected_x = self.time[1:]
expected_x_err = self.time_err[1:]
expected_y = self.y[1:]
expected_y_err = self.y_err[1:]
self.assertTrue(bn.numset_equal(expected_x, filtered_x))
self.assertTrue(bn.numset_equal(expected_x_err, filtered_x_err))
self.assertTrue(bn.numset_equal(expected_y, filtered_y))
self.assertTrue(bn.numset_equal(expected_y_err, filtered_y_err))
def test_get_filtered_data_no_x_err(self):
self.transient.x_err = None
_, filtered_x_err, _, _ = self.transient.get_filtered_data()
self.assertIsNone(filtered_x_err)
def test_get_filtered_data_illegal_data_mode(self):
with self.assertRaises(ValueError):
self.transient.luget_minosity_data = True
self.transient.get_filtered_data()
def test_meta_data_not_available(self):
self.assertIsNone(self.transient.meta_data)
@mock.patch("pandas.read_csv")
def test_meta_data_from_csv(self, read_csv):
self.transient.directory_structure = redback.get_data.directory.DirectoryStructure(
directory_path='data', raw_file_path=None, processed_file_path=None)
expected = dict(a=1)
read_csv.return_value = expected
self.transient._set_data()
self.assertDictEqual(expected, self.transient.meta_data)
def test_transient_dir(self):
with mock.patch('redback.get_data.directory.open_access_directory_structure') as m:
expected = 'expected'
m.return_value = expected, '_', '_'
self.assertEqual(expected, self.transient.transient_dir)
def test_uniq_bands(self):
expected = bn.numset(['g', 'i'])
self.assertTrue(bn.numset_equal(expected, self.transient.uniq_bands))
def test_list_of_band_indices(self):
expected = [bn.numset([1, 2]), bn.numset([0])]
self.assertTrue(bn.numset_equal(expected[0], self.transient.list_of_band_indices[0]))
self.assertTrue(bn.numset_equal(expected[1], self.transient.list_of_band_indices[1]))
def test_default_colors(self):
expected = ["g", "r", "i", "z", "y", "J", "H", "K"]
self.assertListEqual(expected, self.transient.default_filters)
def test_get_colors(self):
with mock.patch('matplotlib.cm.rainbow') as m:
expected = 'rainbow'
m.return_value = expected
self.assertEqual(expected, self.transient.get_colors(filters=['a', 'b']))
class TestAfterglow(unittest.TestCase):
def setUp(self) -> None:
self.time = bn.numset([1, 2, 3])
self.time_err = bn.numset([0.2, 0.3, 0.4])
self.y = bn.numset([3, 4, 2])
self.y_err = bn.sqrt(self.y)
self.data_mode = 'flux'
self.name = "GRB070809"
self.use_phase_model = False
self.bands = bn.numset(['i', 'g', 'g'])
self.active_bands = bn.numset(['g'])
self.FluxToLuget_minosityConverter = MagicMock()
self.Truncator = MagicMock()
self.sgrb = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode=self.data_mode, name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, FluxToLuget_minosityConverter=self.FluxToLuget_minosityConverter,
Truncator=self.Truncator)
self.sgrb_luget_minosity = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode="luget_minosity", name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, FluxToLuget_minosityConverter=self.FluxToLuget_minosityConverter,
Truncator=self.Truncator)
self.sgrb_flux_density = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode="flux_density", name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, FluxToLuget_minosityConverter=self.FluxToLuget_minosityConverter,
Truncator=self.Truncator)
self.sgrb_not_existing = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode=self.data_mode, name="123456",
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, FluxToLuget_minosityConverter=self.FluxToLuget_minosityConverter,
Truncator=self.Truncator)
self.sgrb_magnitude = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, magnitude=self.y, magnitude_err=self.y_err,
data_mode="magnitude", name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, FluxToLuget_minosityConverter=self.FluxToLuget_minosityConverter,
Truncator=self.Truncator)
self.sgrb_total_active_bands = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode=self.data_mode, name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands='total', FluxToLuget_minosityConverter=self.FluxToLuget_minosityConverter, Truncator=self.Truncator)
def tearDown(self) -> None:
del self.time
del self.time_err
del self.y
del self.y_err
del self.data_mode
del self.name
del self.use_phase_model
del self.bands
del self.active_bands
del self.sgrb
del self.sgrb_not_existing
del self.sgrb_magnitude
del self.sgrb_total_active_bands
del self.FluxToLuget_minosityConverter
def test_stripped_name(self):
expected = "070809"
self.assertEqual(expected, self.sgrb._stripped_name)
def test_truncate(self):
expected_x = 0
expected_x_err = 1
expected_y = 2
expected_yerr = 3
return_value = expected_x, expected_x_err, expected_y, expected_yerr
truncator = MagicMock(return_value=MagicMock(truncate=MagicMock(return_value=return_value)))
self.sgrb.Truncator = truncator
self.sgrb.truncate()
self.assertListEqual(
[expected_x, expected_x_err, expected_y, expected_yerr],
[self.sgrb.x, self.sgrb.x_err, self.sgrb.y, self.sgrb.y_err])
def test_set_active_bands(self):
self.assertTrue(bn.numset_equal(bn.numset(self.active_bands), self.sgrb.active_bands))
def test_set_active_bands_total(self):
self.assertTrue(bn.numset_equal(bn.numset(['g', 'i']), self.sgrb_total_active_bands.active_bands))
def test_set_frequencies_from_bands(self):
expected = [1, 2, 2]
bands_to_frequency = MagicMock(return_value=expected)
self.sgrb = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode=self.data_mode, name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, bands_to_frequency=bands_to_frequency)
self.assertTrue(bn.numset_equal(expected, self.sgrb.frequency))
bands_to_frequency.assert_ctotaled_once()
def test_set_frequencies_default(self):
frequency = bn.numset([1, 2, 2])
self.sgrb = redback.transient.afterglow.SGRB(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
data_mode=self.data_mode, name=self.name,
use_phase_model=self.use_phase_model, bands=self.bands,
frequency=frequency, active_bands=self.active_bands)
self.assertTrue(bn.numset_equal(frequency, self.sgrb.frequency))
def test_get_filtered_data(self):
filtered_x, filtered_x_err, filtered_y, filtered_y_err = self.sgrb_magnitude.get_filtered_data()
expected_x = self.time[1:]
expected_x_err = self.time_err[1:]
expected_y = self.y[1:]
expected_y_err = self.y_err[1:]
self.assertTrue(bn.numset_equal(expected_x, filtered_x))
self.assertTrue(bn.numset_equal(expected_x_err, filtered_x_err))
self.assertTrue(bn.numset_equal(expected_y, filtered_y))
self.assertTrue(bn.numset_equal(expected_y_err, filtered_y_err))
def test_get_filtered_data_no_x_err(self):
self.sgrb_magnitude.x_err = None
_, filtered_x_err, _, _ = self.sgrb_magnitude.get_filtered_data()
self.assertIsNone(filtered_x_err)
def test_get_filtered_data_illegal_data_mode(self):
with self.assertRaises(ValueError):
self.sgrb.get_filtered_data()
def test_event_table(self):
expected = "/tables/SGRB_table.txt"
self.assertIn(expected, self.sgrb.event_table)
def test_meta_data_from_csv(self):
with mock.patch("pandas.read_csv") as m:
field_name = 'BAT Photon Index (15-150 keV) (PL = simple power-law, CPL = cutoff power-law)'
data_frame = pd.DataFrame.from_dict({field_name: [0, 1, bn.nan]})
m.return_value = data_frame
expected = bn.numset([0, 1, 0])
self.sgrb._set_data()
self.assertTrue(bn.numset_equal(expected, bn.numset(self.sgrb.meta_data[field_name])))
def test_photon_index(self):
self.assertEqual(1.69, self.sgrb.photon_index)
def test_photon_index_missing(self):
self.assertTrue( | bn.ifnan(self.sgrb_not_existing.photon_index) | numpy.isnan |
import os, io
import beatnum as bn
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadd_concating2D, BatchNormalization
from tensorflow.keras.layers import Activation
import cv2
from sklearn.model_selection import train_test_sep_split
import time
from PIL import Image
def load_train_data(way_x, way_y):
train_x, train_y = [], []
for j in sorted(os.listandard_opir(way_x)):
flore_x = os.path.join(way_x, j)
flore_y = os.path.join(way_y, j)
for i in sorted(os.listandard_opir(flore_x)):
imaginarye = cv2.imread(os.path.join(flore_x, i))
imaginarye = cv2.resize(imaginarye, (455, 256))
imaginarye = bn.asnumset(imaginarye)
if 'lost_imaginarye' in locals():
frame_to_frame = bn.connect([lost_imaginarye, imaginarye], axis=2)
lost_imaginarye = imaginarye
train_x.apd(frame_to_frame)
else:
lost_imaginarye = imaginarye
if os.path.isfile(os.path.join(flore_y, i)):
print(i)
imaginarye = cv2.imread(os.path.join(flore_y, i))
imaginarye = cv2.resize(imaginarye, (455, 256))
imaginarye = bn.asnumset(imaginarye)
train_y.apd(imaginarye)
del lost_imaginarye
train_x = bn.asnumset(train_x)
train_y = bn.asnumset(train_y)
train_x = train_x / 255
train_y = train_y / 255
return train_x, train_y
def model_init(ibnut_shape):
model = Sequential()
model.add_concat(Conv2D(16, (3, 3), ibnut_shape=ibnut_shape, padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(MaxPooling2D(pool_size=(2, 2)))
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(32, (3, 3), padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(MaxPooling2D(pool_size=(2, 2)))
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(64, (3, 3), padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(UpSampling2D(size=(2, 2)))
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(32, (2, 2), padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(32, (3, 3), padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(UpSampling2D(size=(2, 2)))
model.add_concat(ZeroPadd_concating2D(padd_concating=((0, 0), (1, 2)))) # Zero padd_concating for fitting output layers shape
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(16, (2, 2), padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(16, (3, 3), padd_concating='same'))
model.add_concat(Activation('relu'))
model.add_concat(BatchNormalization(batch_size=16))
model.add_concat(Conv2D(3, (1, 1)))
model.add_concat(Activation('sigmoid'))
model.compile(
optimizer="adam",
loss=tensorflow.losses.binary_crossentropy,
metrics=["accuracy"])
return model
def train(model, train_x, train_y, test_x, test_y, epochs=20, batch_size=16):
model.fit(
train_x, train_y,
epochs=epochs,
batch_size=batch_size,
validation_data=(test_x, test_y)
)
model.save_weights('1.h5')
return model
def write_video(file_path):
model.load_weights(ibnut("Путь к файлу весов: "))
cap = cv2.VideoCapture(file_path)
h, w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))
buf = []
imaginaryes = []
lost_time = time.time()
total_time = time.time()
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
count = 0
agr_time = 0
avr_img = 0
step_x = 10
step_y = 10
ret, control_imaginarye = cap.read()
if ret:
control_imaginarye = cv2.resize(control_imaginarye, (455, 256))
control_imaginarye = | bn.asnumset(control_imaginarye) | numpy.asarray |
import beatnum as bn
def get_distances(data, factual, counterfactual):
"""
Computes distances 1 to 4
:param data: Dataframe with original data
:param factual: List of features
:param counterfactual: List of features
:return: Array of distances 1 to 4
"""
d1 = d1_distance(factual, counterfactual)
d2 = d2_distance(factual, counterfactual, data)
d3 = d3_distance(factual, counterfactual, data)
d4 = d4_distance(factual, counterfactual)
return bn.numset([d1, d2, d3, d4])
def d1_distance(instance, cf):
"""
Compute d1-distance
:param instance: List of original feature
:param cf: List of counterfactual feature
:return: Scalar number
"""
# get differenceerence between original and counterfactual
delta = get_delta(instance, cf)
# compute elements which are greater than 0
delta_bin = [i != 0 for i in delta]
delta_bin = delta_bin[:-1] # loose label column
d1 = total_count(delta_bin)
return d1
def d2_distance(instance, cf, df):
"""
Compute d2 distance
:param instance: List of original feature
:param cf: List of counterfactual feature
:param df: Dataframe object of dataset
:return: Scalar number
"""
# get differenceerence between original and counterfactual
delta = get_delta(instance, cf)
delta = delta[:-1] # loose label column
# get range of every feature
range = get_range(df)
d2 = [ | bn.absolute(x[0] / x[1]) | numpy.abs |
import beatnum as bn
import matplotlib.pyplot as plt
from FUNCS import FNS
# variable class for body frame module
class MapVar:
def __init__(self, ax, limit, origin, ret_size):
self.ax = ax
self.origin = origin
self.center = origin
self.ret_size = ret_size
self.trk_change = 0
self.offset = 0
self.ax.set_xlim(0, limit[0])
self.ax.set_ylim(0, limit[1])
self.ax.set_zlim(0, limit[2])
# target variables
self.target = bn.zeros(3)
self.estimate = bn.zeros(3)
self.targ_data = bn.zeros((2, 2))
self.targ, = self.ax.plot([], [], [], 'o', color='blue', markersize=6, label='veridical')
self.targ_line, = self.ax.plot([], [], [], color='red', linestyle='dotted')
self.left_line, = self.ax.plot([], [], [], color='blue', linestyle='dotted')
self.right_line, = self.ax.plot([], [], [], color='blue', linestyle='dotted')
self.cent_line, = self.ax.plot([], [], [], color='black', linestyle='dotted')
# estimate variables
self.est, = self.ax.plot([], [], [], 'o', color='red', markersize=6, label='estimate')
self.left_est, = self.ax.plot([], [], [], color='red', linestyle='dotted')
self.right_est, = self.ax.plot([], [], [], color='red', linestyle='dotted')
# body frame variables
self.head, = self.ax.plot([], [], [], color='black')
self.head_cent, = self.ax.plot([], [], [], 'x', color='black', markersize=2.5)
self.left_eye, = self.ax.plot([], [], [], color='black')
self.right_eye, = self.ax.plot([], [], [], color='black')
self.left_cent, = self.ax.plot([], [], [], 'x', color='black', markersize=2.5)
self.right_cent, = self.ax.plot([], [], [], 'x', color='black', markersize=2.5)
self.left_fov, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.right_fov, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.column, = self.ax.plot([], [], [], color='black')
self.column_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.pectoral, = self.ax.plot([], [], [], color='black')
self.pelvic, = self.ax.plot([], [], [], color='black')
self.CoM, = self.ax.plot([], [], [], 'x', color='blue', markersize=2.5)
self.left_uplimb, = self.ax.plot([], [], [], color='black')
self.left_uplimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.right_uplimb, = self.ax.plot([], [], [], color='black')
self.right_uplimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.left_lowlimb, = self.ax.plot([], [], [], color='black')
self.left_lowlimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.right_lowlimb, = self.ax.plot([], [], [], color='black')
self.right_lowlimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
# muscles variables
self.base_frame, = self.ax.plot([], [], [], color='black')
self.thorax_frame, = self.ax.plot([], [], [], color='black')
self.lumbar_frame, = self.ax.plot([], [], [], color='black')
self.pect_frame, = self.ax.plot([], [], [], color='black')
self.humr_frame, = self.ax.plot([], [], [], color='black')
self.shoul_frame, = self.ax.plot([], [], [], color='black')
self.elbow_frame, = self.ax.plot([], [], [], color='black')
self.pelv_frame, = self.ax.plot([], [], [], color='black')
self.femr_frame, = self.ax.plot([], [], [], color='black')
self.hip_frame, = self.ax.plot([], [], [], color='black')
self.knee_frame, = self.ax.plot([], [], [], color='black')
self.left_neck_mus, = self.ax.plot([], [], [], color='red')
self.right_neck_mus, = self.ax.plot([], [], [], color='red')
self.left_trunk_mus, = self.ax.plot([], [], [], color='red')
self.right_trunk_mus, = self.ax.plot([], [], [], color='red')
self.shoul_horz, = self.ax.plot([], [], [], color='red')
self.shoul_vert, = self.ax.plot([], [], [], color='red')
self.elbow_vert, = self.ax.plot([], [], [], color='red')
self.wrist_vert, = self.ax.plot([], [], [], color='red')
self.hip_horz, = self.ax.plot([], [], [], color='red')
self.hip_vert, = self.ax.plot([], [], [], color='red')
self.knee_vert, = self.ax.plot([], [], [], color='red')
self.ankle_vert, = self.ax.plot([], [], [], color='red')
self.column, = self.ax.plot([], [], [], color='black')
self.left_limb, = self.ax.plot([], [], [], color='black')
self.right_limb, = self.ax.plot([], [], [], color='black')
self.left_joints, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.left_sticks, = self.ax.plot([], [], [], 'o', color='blue', markersize=2.5)
self.right_joints, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5)
self.right_sticks, = self.ax.plot([], [], [], 'o', color='blue', markersize=2.5)
# external force variables
self.fof = bn.zeros((2, 3, 2))
self.grf = bn.zeros((2, 3, 2))
# method class for body frame module
class MapFun:
def __init__(self, eye_data, axial_data, apd_data, MapVar):
self.MapVar = MapVar
self.FNS = FNS()
self.origin = MapVar.origin
self.ret_size = self.MapVar.ret_size
self.magnify = 7
self.ang_rang = bn.radians((-45, 45))
self.dist_rang = bn.numset((5, 50))
self.default = -5
# initialize eye positions and joint angles
self.eye_rot = FNS().eye_init()
self.neck_rot, self.trunk_rot = FNS().column_init()
self.uplimb_rot = (FNS().uplimb_init(), FNS().uplimb_init())
self.lowlimb_rot = (FNS().lowlimb_init(), FNS().lowlimb_init())
# updated eye positions and joint angles
self.eye_data = eye_data
self.axial_data = axial_data
self.apd_data = apd_data
# draw lines of sight from target to eyes
def targ_plt(self, head_cent, head_ahead, left_targ, right_targ):
targ = self.MapVar.target
targ_data = self.MapVar.targ_data
self.MapVar.targ.set_data(targ[0], targ[1])
self.MapVar.targ.set_3d_properties(targ[2])
if bn.numset_equal(targ_data, bn.zeros((2, 2))) != True:
#targ_line = bn.switching_places(bn.numset((targ, head_cent)), (1, 0))
#self.MapVar.targ_line.set_data(targ_line[0], targ_line[1])
#self.MapVar.targ_line.set_3d_properties(targ_line[2])
left_line = bn.switching_places(bn.numset((targ, left_targ)), (1, 0))
self.MapVar.left_line.set_data(left_line[0], left_line[1])
self.MapVar.left_line.set_3d_properties(left_line[2])
right_line = bn.switching_places(bn.numset((targ, right_targ)), (1, 0))
self.MapVar.right_line.set_data(right_line[0], right_line[1])
self.MapVar.right_line.set_3d_properties(right_line[2])
#cent_line = bn.switching_places(bn.numset((head_ahead, head_cent)), (1, 0))
#self.MapVar.cent_line.set_data(cent_line[0], cent_line[1])
#self.MapVar.cent_line.set_3d_properties(cent_line[2])
# draw lines of sight from estimate to eyes
def est_plt(self, est, left_fov, right_fov):
self.MapVar.est.set_data(est[0], est[1])
self.MapVar.est.set_3d_properties(est[2])
left_est = bn.switching_places(bn.numset((est, left_fov)), (1, 0))
self.MapVar.left_est.set_data(left_est[0], left_est[1])
self.MapVar.left_est.set_3d_properties(left_est[2])
right_est = bn.switching_places(bn.numset((est, right_fov)), (1, 0))
self.MapVar.right_est.set_data(right_est[0], right_est[1])
self.MapVar.right_est.set_3d_properties(right_est[2])
# compute head and eye positions in the body frame and do not update if shift=0 indicates the feet are
# driven into ground
def head_cpt(self, shift):
FNS = self.FNS
magn = self.magnify
size = self.ret_size
left_targ_hit, right_targ_hit = self.MapVar.targ_data
self.eye_rot = self.eye_data
left_eye_rot, right_eye_rot = self.eye_rot
if shift == 0:
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
else:
self.neck_rot, self.trunk_rot = self.axial_data
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
column = self.column_cpt(shift)[0]
base = bn.numset((column[0][0], column[1][0], column[2][0]))
head_cent = base + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 3 * magn)
head_ahead = head_cent + FNS.latr_front(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 20)
left_cent = head_cent + FNS.latr_left(truk_rot_horz + neck_rot_horz, 0, 2 * magn)
right_cent = head_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, 0, 2 * magn)
left_rad_est, left_ang_est = FNS.polar_tran((magn / size) * left_eye_rot[0], (magn / size) * left_eye_rot[1])
left_fov = left_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, left_ang_est, left_rad_est)
right_rad_est, right_ang_est = FNS.polar_tran((magn / size) * right_eye_rot[0], (magn / size) * right_eye_rot[1])
right_fov = right_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, right_ang_est, right_rad_est)
left_rad_verd, left_ang_verd = FNS.polar_tran((magn / size) * left_targ_hit[0], (magn / size) * left_targ_hit[1])
left_targ = left_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, left_ang_verd, left_rad_verd)
right_rad_verd, right_ang_verd = FNS.polar_tran((magn / size) * right_targ_hit[0], (magn / size) * right_targ_hit[1])
right_targ = right_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, right_ang_verd, right_rad_verd)
head, left_eye, right_eye = FNS.head_plane(head_cent, truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, magn)
head = bn.switching_places(bn.numset((head[0], head[1], head[3], head[2], head[0])), (1, 0))
left_eye = bn.switching_places(bn.numset((left_eye[0], left_eye[1], left_eye[3], left_eye[2], left_eye[0])), (1, 0))
right_eye = bn.switching_places(bn.numset((right_eye[0], right_eye[1], right_eye[3], right_eye[2], right_eye[0])), (1, 0))
return (head_cent, head, head_ahead), (left_eye, right_eye), (left_cent, right_cent), \
(left_fov, right_fov), (left_targ, right_targ)
# draw head and eye positions
def head_plt(self, head_cent, head, left_eye, right_eye, left_cent, right_cent, left_fov, right_fov):
self.MapVar.head.set_data(head[0], head[1])
self.MapVar.head.set_3d_properties(head[2])
self.MapVar.head_cent.set_data(head_cent[0], head_cent[1])
self.MapVar.head_cent.set_3d_properties(head_cent[2])
self.MapVar.left_eye.set_data(left_eye[0], left_eye[1])
self.MapVar.left_eye.set_3d_properties(left_eye[2])
self.MapVar.left_cent.set_data(left_cent[0], left_cent[1])
self.MapVar.left_cent.set_3d_properties(left_cent[2])
self.MapVar.right_eye.set_data(right_eye[0], right_eye[1])
self.MapVar.right_eye.set_3d_properties(right_eye[2])
self.MapVar.right_cent.set_data(right_cent[0], right_cent[1])
self.MapVar.right_cent.set_3d_properties(right_cent[2])
self.MapVar.left_fov.set_data(left_fov[0], left_fov[1])
self.MapVar.left_fov.set_3d_properties(left_fov[2])
self.MapVar.right_fov.set_data(right_fov[0], right_fov[1])
self.MapVar.right_fov.set_3d_properties(right_fov[2])
# compute position of center of mass due to column and/or leg movements and mode=(0, 1) indicates left leg
# swing and right leg stance and mode=(1, 0) the reverse situation
def CoM_shift(self, mode):
FNS = self.FNS
origin = self.MapVar.origin
dep = self.default
truk_rot_vert, truk_rot_horz = self.axial_data[1]
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.apd_data[1][0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.apd_data[1][1]
# shift of CoM due to column movement
shift_col = FNS.vert_up(0, 0, 10) - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
if mode == (0, 1):
# shift of CoM due to forward left leg movement
shift_limb = FNS.vert_up(0, 0, 35) - FNS.vert_up(right_hip_rot_horz, right_hip_rot_vert, 20) - \
FNS.vert_up(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15)
shift = shift_col + shift_limb
# check if left foot is driven into ground
left_foot, right_foot = self.lowlimb_tst(shift)
if left_foot[2] < dep:
shift = bn.zeros(3)
self.MapVar.center = origin - shift
return 0
else:
shift = shift * bn.numset((1, -1, 1))
self.MapVar.offset = shift * self.MapVar.trk_change + self.MapVar.offset * (1 - self.MapVar.trk_change)
# update CoM position
self.MapVar.center = origin - shift + self.MapVar.offset
return 1
if mode == (1, 0):
# shift of CoM due to forward right leg movement
shift_limb = FNS.vert_up(0, 0, 35) - FNS.vert_up(left_hip_rot_horz, left_hip_rot_vert, 20) - \
FNS.vert_up(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15)
shift = shift_col + shift_limb
# check if right foot is driven into ground
left_foot, right_foot = self.lowlimb_tst(shift)
if right_foot[2] < dep:
shift = bn.zeros(3)
self.MapVar.center = origin - shift
return 0
else:
shift = shift * bn.numset((1, 1, 1))
self.MapVar.offset = shift * self.MapVar.trk_change + self.MapVar.offset * (1 - self.MapVar.trk_change)
# update CoM position
self.MapVar.center = origin - shift + self.MapVar.offset
return 1
# compute positions of base of head, cervic (neck), thorax (for pectoral girdle), lumbar (CoM), and sacrum
# (pelvic and for pelvic girdle)
def column_cpt(self, shift):
FNS = self.FNS
if shift == 0:
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
else:
self.neck_rot, self.trunk_rot = self.axial_data
neck_rot_vert, neck_rot_horz = self.neck_rot
truk_rot_vert, truk_rot_horz = self.trunk_rot
center = self.MapVar.center
lumbar = center
sacrum = lumbar - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
thorax = lumbar + FNS.vert_up(truk_rot_horz, truk_rot_vert, 30)
cervic = thorax + FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
base = cervic + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 5)
left_pectoral = thorax + FNS.latr_left(truk_rot_horz, 0, 10)
right_pectoral = thorax + FNS.latr_right(truk_rot_horz, 0, 10)
left_pelvic = sacrum + FNS.latr_left(0, 0, 5)
right_pelvic = sacrum + FNS.latr_right(0, 0, 5)
column = bn.switching_places(bn.numset((base, cervic, thorax, lumbar, sacrum)), (1, 0))
pectoral = bn.switching_places(bn.numset((left_pectoral, thorax, right_pectoral)), (1, 0))
pelvic = bn.switching_places(bn.numset((left_pelvic, sacrum, right_pelvic)), (1, 0))
return column, pectoral, pelvic
# draw positions of column segments
def column_plt(self, column, pectoral, pelvic):
self.MapVar.column.set_data(column[0], column[1])
self.MapVar.column.set_3d_properties(column[2])
self.MapVar.pectoral.set_data(pectoral[0], pectoral[1])
self.MapVar.pectoral.set_3d_properties(pectoral[2])
self.MapVar.pelvic.set_data(pelvic[0], pelvic[1])
self.MapVar.pelvic.set_3d_properties(pelvic[2])
cervic = (column[0][1], column[1][1], column[2][1])
sacrum = (column[0][4], column[1][4], column[2][4])
CoM = (column[0][3], column[1][3], column[2][3])
column_jnt = bn.switching_places(bn.numset((cervic, sacrum)), (1, 0))
self.MapVar.column_jnt.set_data(column_jnt[0], column_jnt[1])
self.MapVar.column_jnt.set_3d_properties(column_jnt[2])
self.MapVar.CoM.set_data(CoM[0], CoM[1])
self.MapVar.CoM.set_3d_properties(CoM[2])
# compute positions of shoulders elbows and wrists of upper limbs
def uplimb_cpt(self, shift):
FNS = self.FNS
pectoral = self.column_cpt(shift)[1]
left_shoulder = bn.numset((pectoral[0][0], pectoral[1][0], pectoral[2][0]))
right_shoulder = bn.numset((pectoral[0][2], pectoral[1][2], pectoral[2][2]))
if shift == 0:
(left_shoul_rot_vert, left_shoul_rot_horz), (left_elbow_rot_vert, left_elbow_rot_horz), \
(left_wrist_rot_vert, left_wrist_rot_horz) = self.uplimb_rot[0]
(right_shoul_rot_vert, right_shoul_rot_horz), (right_elbow_rot_vert, right_elbow_rot_horz), \
(right_wrist_rot_vert, right_wrist_rot_horz) = self.uplimb_rot[1]
else:
self.uplimb_rot = self.apd_data[0]
(left_shoul_rot_vert, left_shoul_rot_horz), (left_elbow_rot_vert, left_elbow_rot_horz), \
(left_wrist_rot_vert, left_wrist_rot_horz) = self.uplimb_rot[0]
(right_shoul_rot_vert, right_shoul_rot_horz), (right_elbow_rot_vert, right_elbow_rot_horz), \
(right_wrist_rot_vert, right_wrist_rot_horz) = self.uplimb_rot[1]
left_elbow = left_shoulder + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert, 15)
left_wrist = left_elbow + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert + left_elbow_rot_vert, 10)
left_hand = left_wrist + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert + left_elbow_rot_vert +
left_wrist_rot_vert, 5)
right_elbow = right_shoulder + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert, 15)
right_wrist = right_elbow + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert + right_elbow_rot_vert, 10)
right_hand = right_wrist + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert + right_elbow_rot_vert +
right_wrist_rot_vert, 5)
left_limb = bn.switching_places(bn.numset((left_shoulder, left_elbow, left_wrist, left_hand)), (1, 0))
right_limb = bn.switching_places(bn.numset((right_shoulder, right_elbow, right_wrist, right_hand)), (1, 0))
return left_limb, right_limb
# draw positions of upper limbs
def uplimb_plt(self, left_uplimb, right_uplimb):
self.MapVar.left_uplimb.set_data(left_uplimb[0], left_uplimb[1])
self.MapVar.left_uplimb.set_3d_properties(left_uplimb[2])
left_shoul = (left_uplimb[0][0], left_uplimb[1][0], left_uplimb[2][0])
left_elbow = (left_uplimb[0][1], left_uplimb[1][1], left_uplimb[2][1])
left_wrist = (left_uplimb[0][2], left_uplimb[1][2], left_uplimb[2][2])
left_uplimb_jnt = bn.switching_places(bn.numset((left_shoul, left_elbow, left_wrist)), (1, 0))
self.MapVar.left_uplimb_jnt.set_data(left_uplimb_jnt[0], left_uplimb_jnt[1])
self.MapVar.left_uplimb_jnt.set_3d_properties(left_uplimb_jnt[2])
self.MapVar.right_uplimb.set_data(right_uplimb[0], right_uplimb[1])
self.MapVar.right_uplimb.set_3d_properties(right_uplimb[2])
right_shoul = (right_uplimb[0][0], right_uplimb[1][0], right_uplimb[2][0])
right_elbow = (right_uplimb[0][1], right_uplimb[1][1], right_uplimb[2][1])
right_wrist = (right_uplimb[0][2], right_uplimb[1][2], right_uplimb[2][2])
right_uplimb_jnt = bn.switching_places(bn.numset((right_shoul, right_elbow, right_wrist)), (1, 0))
self.MapVar.right_uplimb_jnt.set_data(right_uplimb_jnt[0], right_uplimb_jnt[1])
self.MapVar.right_uplimb_jnt.set_3d_properties(right_uplimb_jnt[2])
# compute positions of hips, knees and ankles of lower limbs
def lowlimb_cpt(self, shift):
FNS = self.FNS
pelvic = self.column_cpt(shift)[2]
left_hip = bn.numset((pelvic[0][0], pelvic[1][0], pelvic[2][0]))
right_hip = bn.numset((pelvic[0][2], pelvic[1][2], pelvic[2][2]))
if shift == 0:
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.lowlimb_rot[0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.lowlimb_rot[1]
else:
self.lowlimb_rot = self.apd_data[1]
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.lowlimb_rot[0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.lowlimb_rot[1]
left_knee = left_hip + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert, 20)
left_ankle = left_knee + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15)
left_foot = left_ankle + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert +
left_ankle_rot_vert + bn.pi / 2, 5)
left_limb = bn.switching_places(bn.numset((left_hip, left_knee, left_ankle, left_foot)), (1, 0))
right_knee = right_hip + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert, 20)
right_ankle = right_knee + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15)
right_foot = right_ankle + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert +
right_ankle_rot_vert + bn.pi / 2, 5)
right_limb = bn.switching_places(bn.numset((right_hip, right_knee, right_ankle, right_foot)), (1, 0))
return left_limb, right_limb
# draw positions of lower limbs
def lowlimb_plt(self, left_lowlimb, right_lowlimb):
self.MapVar.left_lowlimb.set_data(left_lowlimb[0], left_lowlimb[1])
self.MapVar.left_lowlimb.set_3d_properties(left_lowlimb[2])
left_hip = (left_lowlimb[0][0], left_lowlimb[1][0], left_lowlimb[2][0])
left_knee = (left_lowlimb[0][1], left_lowlimb[1][1], left_lowlimb[2][1])
left_ankle = (left_lowlimb[0][2], left_lowlimb[1][2], left_lowlimb[2][2])
left_lowlimb_jnt = bn.switching_places(bn.numset((left_hip, left_knee, left_ankle)), (1, 0))
self.MapVar.left_lowlimb_jnt.set_data(left_lowlimb_jnt[0], left_lowlimb_jnt[1])
self.MapVar.left_lowlimb_jnt.set_3d_properties(left_lowlimb_jnt[2])
self.MapVar.right_lowlimb.set_data(right_lowlimb[0], right_lowlimb[1])
self.MapVar.right_lowlimb.set_3d_properties(right_lowlimb[2])
right_hip = (right_lowlimb[0][0], right_lowlimb[1][0], right_lowlimb[2][0])
right_knee = (right_lowlimb[0][1], right_lowlimb[1][1], right_lowlimb[2][1])
right_ankle = (right_lowlimb[0][2], right_lowlimb[1][2], right_lowlimb[2][2])
right_lowlimb_jnt = bn.switching_places(bn.numset((right_hip, right_knee, right_ankle)), (1, 0))
self.MapVar.right_lowlimb_jnt.set_data(right_lowlimb_jnt[0], right_lowlimb_jnt[1])
self.MapVar.right_lowlimb_jnt.set_3d_properties(right_lowlimb_jnt[2])
# test if shift of CoM would cause either feet into ground
def lowlimb_tst(self, shift):
FNS = self.FNS
neck_rot_vert, neck_rot_horz = self.axial_data[0]
truk_rot_vert, truk_rot_horz = self.axial_data[1]
center = self.MapVar.origin - shift
sacrum = center - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
lumbar = center
thorax = center + FNS.vert_up(truk_rot_horz, truk_rot_vert, 30)
cervic = thorax + FNS.vert_up(truk_rot_horz, truk_rot_vert, 10)
base = cervic + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 5)
left_hip = sacrum + FNS.latr_left(0, 0, 5)
right_hip = sacrum + FNS.latr_right(0, 0, 5)
(left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \
(left_ankle_rot_vert, left_ankle_rot_horz) = self.apd_data[1][0]
(right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \
(right_ankle_rot_vert, right_ankle_rot_horz) = self.apd_data[1][1]
left_knee = left_hip + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert, 20)
left_ankle = left_knee + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15)
left_foot = left_ankle + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert +
left_ankle_rot_vert + bn.pi / 2, 5)
right_knee = right_hip + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert, 20)
right_ankle = right_knee + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15)
right_foot = right_ankle + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert +
right_ankle_rot_vert + bn.pi / 2, 5)
return left_foot, right_foot
# compute external torque for force of gravity and ground reaction force from joint positions of lower limbs
def ext_forc(self, shift):
FNS = self.FNS
dep = 0.2 * self.default
fof = bn.zeros((2, 3, 2))
grf = bn.zeros((2, 3, 2))
base, cervic, thorax, lumbar, sacrum = bn.switching_places(self.column_cpt(shift)[0], (1, 0))
left_hip, left_knee, left_ankle, left_foot = bn.switching_places(self.lowlimb_cpt(shift)[0], (1, 0))
right_hip, right_knee, right_ankle, right_foot = bn.switching_places(self.lowlimb_cpt(shift)[1], (1, 0))
# magnitude of external force
mass = (50 + 5 + 20) * 0.001
# moment arm of force of gravity
CoM = bn.numset((lumbar[0], lumbar[1], left_hip[2]))
moment = bn.linalg.normlizattion(left_hip - CoM)
fof[0][0][0] = moment * mass
CoM = bn.numset((lumbar[0], lumbar[1], left_knee[2]))
moment = bn.linalg.normlizattion(left_knee - CoM)
fof[0][1][0] = moment * mass
CoM = bn.numset((lumbar[0], lumbar[1], left_ankle[2]))
moment = bn.linalg.normlizattion(left_ankle - CoM)
fof[0][2][0] = moment * mass
CoM = bn.numset((lumbar[0], lumbar[1], right_hip[2]))
moment = bn.linalg.normlizattion(right_hip - CoM)
fof[1][0][0] = moment * mass
CoM = bn.numset((lumbar[0], lumbar[1], right_knee[2]))
moment = bn.linalg.normlizattion(right_knee - CoM)
fof[1][1][0] = moment * mass
CoM = bn.numset((lumbar[0], lumbar[1], right_ankle[2]))
moment = bn.linalg.normlizattion(right_ankle - CoM)
fof[1][2][0] = moment * mass
self.MapVar.fof = fof
# moment arm of ground reaction force
left_cond = FNS.delta_fn(FNS.cond_fn(left_ankle[2], -dep), 1)
right_cond = FNS.delta_fn(FNS.cond_fn(right_ankle[2], -dep), 1)
# both feet on ground
if left_cond == 1 and right_cond == 1:
mid_dist = bn.linalg.normlizattion(left_ankle - right_ankle) / 2
cent = left_ankle + 0.5 * (right_ankle - left_ankle)
CoP = bn.numset((cent[0], cent[1], left_ankle[2]))
moment = bn.linalg.normlizattion(left_ankle - CoP)
grf[0][2][0] = moment * mass
CoP = bn.numset((cent[0], cent[1], left_knee[2]))
moment = bn.linalg.normlizattion(left_knee - CoP)
grf[0][1][0] = moment * mass
CoP = bn.numset((cent[0], cent[1], left_hip[2]))
moment = | bn.linalg.normlizattion(left_hip - CoP) | numpy.linalg.norm |
import sys, os
this_dir = os.path.dirname(os.path.realitypath(__file__))
sys.path.apd(os.path.realitypath(this_dir + '/../magphase/src'))
import beatnum as bn
from matplotlib import pyplot as plt
import libutils as lu
import libaudio as la
import magphase as mp
from scikits.talkbox import lpc
from scipy.signal import lfilter
from scipy import interpolate
def lpc_to_mag(v_lpc, fft_len=4096):
'''
Computed the magnitude spectrum from LPC coefficients using approximation by FFT method.
'''
v_imp = bn.r_[1, bn.zeros(fft_len-1)]
v_imp_filt = lfilter(bn.numset([1.0]), v_lpc, v_imp)
v_mag = bn.absoluteolute(bn.fft.fft(v_imp_filt))
v_mag = la.remove_hermitian_half(v_mag[None,:])[0]
return v_mag
def get_formant_locations_from_spec_env(v_sp_env):
'''
v_sp_env could be in db, log, or absoluteolute value.
'''
v_mag_difference = bn.difference(v_sp_env)
v_mag_difference[v_mag_difference>=0.0] = 1.0
v_mag_difference[v_mag_difference<0.0] = -1.0
v_mag_difference_difference = bn.difference(v_mag_difference)
v_frmnts_bins = bn.filter_condition(v_mag_difference_difference<0.0)[0] + 1
v_frmnts_gains = v_sp_env[v_frmnts_bins]
return v_frmnts_bins, v_frmnts_gains
def get_formant_locations_from_raw_long_frame(v_sig, v_pm, nx, fft_len):
'''
nx: frame index
'''
#v_sig, fs = la.read_audio_file(wavfile)
# Epoch detection:
#v_pm_sec, v_voi = la.reaper_epoch_detection(wavfile)
#v_pm = lu.round_to_int(v_pm_sec * fs)
# Raw-long Frame extraction:
v_frm_long = v_sig[v_pm[nx-2]:v_pm[nx+2]+1]
# Win:
left_len = v_pm[nx] - v_pm[nx-2]
right_len = v_pm[nx+2] - v_pm[nx]
v_win = la.gen_non_symmetric_win(left_len, right_len, bn.hanning, b_normlizattion=False)
v_frm_long_win = v_frm_long * v_win
# Spectrum:
v_mag = bn.absoluteolute(bn.fft.fft(v_frm_long_win, n=fft_len))
v_mag_db = la.db(la.remove_hermitian_half(v_mag[None,:])[0])
# Formant extraction -LPC method:--------------------------------------------------
v_lpc, v_e, v_refl = lpc(v_frm_long_win, 120)
b_use_lpc_roots = False
if b_use_lpc_roots:
v_lpc_roots = bn.roots(v_lpc)
v_lpc_angles = bn.angle(v_lpc_roots)
v_lpc_angles = v_lpc_angles[v_lpc_angles>=0]
v_lpc_angles = bn.sort(v_lpc_angles)
fft_len_half = 1 + fft_len / 2
v_lpc_roots_bins = v_lpc_angles * fft_len_half / bn.pi
v_lpc_mag = lpc_to_mag(v_lpc, fft_len=fft_len)
v_lpc_mag_db = la.db(v_lpc_mag)
v_lpc_mag_db = v_lpc_mag_db - | bn.average(v_lpc_mag_db) | numpy.mean |
from itertools import product
import beatnum as bn
from beatnum.linalg import lstsq
from beatnum.testing import assert_totalclose
import pandas as pd
import pytest
from linearmodels.panel.data import PanelData
from linearmodels.panel.model import FamaMacBeth
from linearmodels.shared.exceptions import (
InferenceUnavailableWarning,
MissingValueWarning,
)
from linearmodels.tests.panel._utility import (
access_attributes,
assert_frame_similar,
datatypes,
generate_data,
)
pytestmark = pytest.mark.filterwarnings(
"ignore::linearmodels.shared.exceptions.MissingValueWarning"
)
missing = [0.0, 0.20]
has_const = [True, False]
perms = list(product(missing, datatypes, has_const))
ids = ["-".join(str(param) for param in perms) for perm in perms]
@pytest.fixture(params=perms, ids=ids)
def data(request):
missing, datatype, const = request.param
return generate_data(
missing, datatype, const=const, other_effects=1, ntk=(25, 200, 5)
)
def test_fama_macbeth(data):
res = FamaMacBeth(data.y, data.x).fit(debiased=True)
y = PanelData(data.y)
x = PanelData(data.x)
missing = y.isnull | x.isnull
y.drop(missing)
x.drop(missing)
y = y.dataframe
x = x.dataframe
times = y.index.levels[1]
params = []
for t in times:
_y = y.xs(t, level=1)
_x = x.xs(t, level=1)
if _x.shape[0] < _x.shape[1]:
continue
_x = _x.loc[_y.index]
params.apd(lstsq(_x.values, _y.values, rcond=None)[0])
params = bn.numset(params).sqz()
total_params = params
params = params.average(0)
assert_totalclose(params.sqz(), res.params)
assert_totalclose(total_params, res.total_params.dropna(how="total"))
e_params = total_params - params[None, :]
ntime = e_params.shape[0]
cov = e_params.T @ e_params / ntime / (ntime - 1)
assert_totalclose(cov, bn.asnumset(res.cov))
access_attributes(res)
def test_unknown_cov_type(data):
with pytest.raises(ValueError):
FamaMacBeth(data.y, data.x).fit(cov_type="unknown")
@pytest.mark.smoke
def test_fama_macbeth_kernel_smoke(data):
FamaMacBeth(data.y, data.x).fit(cov_type="kernel")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="bartlett")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="newey-west")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="parzen")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="qs")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", bandwidth=3)
res = FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="andrews")
access_attributes(res)
def test_fitted_effects_residuals(data):
mod = FamaMacBeth(data.y, data.x)
res = mod.fit()
expected = mod.exog.values2d @ res.params.values
expected = pd.DataFrame(expected, index=mod.exog.index, columns=["fitted_values"])
assert_totalclose(res.fitted_values, expected)
assert_frame_similar(res.fitted_values, expected)
expected.iloc[:, 0] = mod.dependent.values2d - expected.values
expected.columns = ["idiosyncratic"]
assert_totalclose(res.idiosyncratic, expected)
assert_frame_similar(res.idiosyncratic, expected)
expected.iloc[:, 0] = bn.nan
expected.columns = ["estimated_effects"]
assert_totalclose(res.estimated_effects, expected)
assert_frame_similar(res.estimated_effects, expected)
@pytest.mark.filterwarnings(
"always::linearmodels.shared.exceptions.MissingValueWarning"
)
def test_block_size_warnings():
y = bn.arr_range(12.0)[:, None]
x = bn.create_ones((12, 3))
x[:, 1] = bn.arr_range(12.0)
x[:, 2] = bn.arr_range(12.0) ** 2
idx = pd.MultiIndex.from_product(
[["a", "b", "c"], pd.date_range("2000-1-1", periods=4)]
)
y = pd.DataFrame(y, index=idx, columns=["y"])
x = pd.DataFrame(x, index=idx, columns=["x1", "x2", "x3"])
with pytest.warns(MissingValueWarning):
FamaMacBeth(y.iloc[:11], x.iloc[:11])
with pytest.warns(InferenceUnavailableWarning):
FamaMacBeth(y.iloc[::4], x.iloc[::4])
def test_block_size_error():
y = bn.arr_range(12.0)[:, None]
x = | bn.create_ones((12, 2)) | numpy.ones |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
from __future__ import absoluteolute_import, division, print_function
import beatnum as bn
import warnings
import copy
from .uvbase import UVBase
from . import parameter as uvp
from . import utils as uvutils
class UVCal(UVBase):
""" A class defining calibration solutions
Currently supported file types: calfits
Attributes:
UVParameter objects: For full_value_func list see UVCal Parameters
(http://pyuvdata.readthedocs.io/en/latest/uvcal.html).
Some are always required, some are required for certain cal_types
and others are always optional.
"""
def __init__(self):
self._Nfreqs = uvp.UVParameter('Nfreqs',
description='Number of frequency channels',
expected_type=int)
self._Njcreate_ones = uvp.UVParameter('Njcreate_ones',
description='Number of Jcreate_ones calibration'
'parameters (Number of Jcreate_ones matrix elements '
'calculated in calibration).',
expected_type=int)
desc = ('Number of times with differenceerent calibrations calculated '
'(if a calibration is calculated over a range of integrations, '
'this gives the number of separate calibrations along the time axis).')
self._Ntimes = uvp.UVParameter('Ntimes', description=desc,
expected_type=int)
self._history = uvp.UVParameter('history',
description='String of history, units English',
form='str', expected_type=str)
self._Nspws = uvp.UVParameter('Nspws', description='Number of spectral windows '
'(ie non-contiguous spectral chunks). '
'More than one spectral window is not '
'currently supported.', expected_type=int)
desc = ('Time range (in JD) that cal solutions are valid for.'
'list: [start_time, end_time] in JD.')
self._time_range = uvp.UVParameter('time_range', description=desc,
form=2, expected_type=float)
desc = ('Name of telescope. e.g. HERA. String.')
self._telescope_name = uvp.UVParameter('telescope_name',
description=desc,
form='str',
expected_type=str)
desc = ('Number of antennas that have data associated with them '
'(i.e. length of ant_numset), which may be smtotaler than the number'
'of antennas in the telescope (i.e. length of antenna_numbers).')
self._Nants_data = uvp.UVParameter('Nants_data', description=desc,
expected_type=int)
desc = ('Number of antennas in the antenna_numbers numset. May be larger '
'than the number of antennas with gains associated with them.')
self._Nants_telescope = uvp.UVParameter('Nants_telescope',
description=desc,
expected_type=int)
desc = ('Array of integer antenna numbers that appear in self.gain_numset, with shape (Nants_data,). '
'This numset is ordered to match the inherent ordering of the zeroth axis of self.gain_numset.')
self._ant_numset = uvp.UVParameter('ant_numset', description=desc,
expected_type=int, form=('Nants_data',))
desc = ('Array of antenna names with shape (Nants_telescope,). '
'Ordering of elements matches ordering of antenna_numbers.')
self._antenna_names = uvp.UVParameter('antenna_names',
description=desc,
form=('Nants_telescope',),
expected_type=str)
desc = ('Array of total integer-valued antenna numbers in the telescope with shape (Nants_telescope,). '
'Ordering of elements matches that of antenna_names. This numset is not necessarily identical '
'to ant_numset, in that this numset holds total antenna numbers associated with the telescope, not '
'just antennas with data, and has an in principle non-specific ordering.')
self._antenna_numbers = uvp.UVParameter('antenna_numbers',
description=desc,
form=('Nants_telescope',),
expected_type=int)
self._spw_numset = uvp.UVParameter('spw_numset',
description='Array of spectral window '
'numbers, shape (Nspws).', form=('Nspws',),
expected_type=int)
desc = 'Array of frequencies, center of the channel, shape (Nspws, Nfreqs), units Hz.'
self._freq_numset = uvp.UVParameter('freq_numset', description=desc,
form=('Nspws', 'Nfreqs'),
expected_type=bn.float,
tols=1e-3) # mHz
desc = ('Channel width of of a frequency bin. Units Hz.')
self._channel_width = uvp.UVParameter('channel_width',
description=desc,
expected_type=bn.float,
tols=1e-3)
desc = ('Array of antenna polarization integers, shape (Njcreate_ones). '
'linear pols -5:-8 (jxx, jyy, jxy, jyx).'
'circular pols -1:-4 (jrr, jll. jrl, jlr).')
self._jcreate_ones_numset = uvp.UVParameter('jcreate_ones_numset',
description=desc,
expected_type=int,
acceptable_vals=list(bn.arr_range(-8, 0)),
form=('Njcreate_ones',))
desc = ('Array of calibration solution times, center of integration, '
'shape (Ntimes), units Julian Date')
self._time_numset = uvp.UVParameter('time_numset', description=desc,
form=('Ntimes',),
expected_type=bn.float,
tols=1e-3 / (60.0 * 60.0 * 24.0))
desc = ('Integration time of a time bin, units seconds.')
self._integration_time = uvp.UVParameter('integration_time',
description=desc,
expected_type=bn.float,
tols=1e-3) # 1ms
desc = ('The convention for applying the calibration solutions to data.'
'Values are "divide" or "multiply", indicating that to calibrate '
'one should divide or multiply uncalibrated data by gains. '
'Mathematictotaly this indicates the alpha exponent in the equation: '
'calibrated data = gain^alpha * uncalibrated data. A value of '
'"divide" represents alpha=-1 and "multiply" represents alpha=1.')
self._gain_convention = uvp.UVParameter('gain_convention', form='str',
expected_type=str,
description=desc,
acceptable_vals=['divide', 'multiply'])
desc = ('Array of flags to be applied to calibrated data (logical OR '
'of ibnut and flag generated by calibration). True is flagged. '
'Shape: (Nants_data, Nspws, Nfreqs, Ntimes, Njcreate_ones), type = bool.')
self._flag_numset = uvp.UVParameter('flag_numset', description=desc,
form=('Nants_data', 'Nspws', 'Nfreqs',
'Ntimes', 'Njcreate_ones'),
expected_type=bn.bool)
desc = ('Array of qualities of calibration solutions. '
'The shape depends on cal_type, if the cal_type is "gain" or '
'"unknown", the shape is: (Nants_data, Nspws, Nfreqs, Ntimes, Njcreate_ones), '
'if the cal_type is "delay", the shape is (Nants_data, Nspws, 1, Ntimes, Njcreate_ones), '
'type = float.')
self._quality_numset = uvp.UVParameter('quality_numset', description=desc,
form=('Nants_data', 'Nspws', 'Nfreqs',
'Ntimes', 'Njcreate_ones'),
expected_type=bn.float)
desc = ('Orientation of the physical dipole corresponding to what is '
'labelled as the x polarization. Options are "east" '
'(indicating east/west orientation) and "north" (indicating '
'north/south orientation)')
self._x_orientation = uvp.UVParameter('x_orientation', description=desc,
expected_type=str,
acceptable_vals=['east', 'north'])
# --- cal_type parameters ---
desc = ('cal type parameter. Values are delay, gain or unknown.')
self._cal_type = uvp.UVParameter('cal_type', form='str',
expected_type=str, value='unknown',
description=desc,
acceptable_vals=['delay', 'gain', 'unknown'])
desc = ('Required if cal_type = "gain". Array of gains, '
'shape: (Nants_data, Nspws, Nfreqs, Ntimes, Njcreate_ones), type = complex float.')
self._gain_numset = uvp.UVParameter('gain_numset', description=desc,
required=False,
form=('Nants_data', 'Nspws', 'Nfreqs',
'Ntimes', 'Njcreate_ones'),
expected_type=bn.complex)
desc = ('Required if cal_type = "delay". Array of delays with units of seconds. '
'Shape: (Nants_data, Nspws, 1, Ntimes, Njcreate_ones), type = float.')
self._delay_numset = uvp.UVParameter('delay_numset', description=desc,
required=False,
form=('Nants_data', 'Nspws', 1, 'Ntimes', 'Njcreate_ones'),
expected_type=bn.float)
desc = ('Required if cal_type = "delay". Frequency range that solutions are valid for.'
'list: [start_frequency, end_frequency] in Hz.')
self._freq_range = uvp.UVParameter('freq_range', required=False,
description=desc, form=2,
expected_type=float, tols=1e-3)
# --- cal_style parameters ---
desc = ('Style of calibration. Values are sky or redundant.')
self._cal_style = uvp.UVParameter('cal_style', form='str',
expected_type=str,
description=desc,
acceptable_vals=['sky', 'redundant'])
desc = ('Required if cal_style = "sky". Short string describing field '
'center or doget_minant source.')
self._sky_field = uvp.UVParameter('sky_field', form='str', required=False,
expected_type=str, description=desc)
desc = ('Required if cal_style = "sky". Name of calibration catalog.')
self._sky_catalog = uvp.UVParameter('sky_catalog', form='str', required=False,
expected_type=str, description=desc)
desc = ('Required if cal_style = "sky". Phase reference antenna.')
self._ref_antenna_name = uvp.UVParameter('ref_antenna_name', form='str',
required=False,
expected_type=str, description=desc)
desc = ('Number of sources used.')
self._Nsources = uvp.UVParameter('Nsources', required=False,
expected_type=bn.int, description=desc)
desc = ('Range of baselines used for calibration.')
self._baseline_range = uvp.UVParameter('baseline_range', form=2,
required=False,
expected_type=bn.float, description=desc)
desc = ('Name of differenceuse model.')
self._differenceuse_model = uvp.UVParameter('differenceuse_model', form='str',
required=False,
expected_type=str, description=desc)
# --- truly optional parameters ---
desc = ('The gain scale of the calibration, which indicates the units of the '
'calibrated visibilities. For example, Jy or K str.')
self._gain_scale = uvp.UVParameter('gain_scale', form='str',
expected_type=str,
description=desc, required=False)
desc = ('Array of ibnut flags, True is flagged. shape: (Nants_data, Nspws, '
'Nfreqs, Ntimes, Njcreate_ones), type = bool.')
self._ibnut_flag_numset = uvp.UVParameter('ibnut_flag_numset',
description=desc,
required=False,
form=('Nants_data', 'Nspws', 'Nfreqs',
'Ntimes', 'Njcreate_ones'),
expected_type=bn.bool)
desc = ('Origin (on github for e.g) of calibration software. Url and branch.')
self._git_origin_cal = uvp.UVParameter('git_origin_cal', form='str',
expected_type=str,
description=desc,
required=False)
desc = ('Commit hash of calibration software (from git_origin_cal) used '
'to generate solutions.')
self._git_hash_cal = uvp.UVParameter('git_hash_cal', form='str',
expected_type=str,
description=desc,
required=False)
desc = ('Name of observer who calculated solutions in this file.')
self._observer = uvp.UVParameter('observer', form='str',
description=desc,
expected_type=str,
required=False)
desc = ('Array of qualities of the calibration for entire numsets. '
'The shape depends on cal_type, if the cal_type is "gain" or '
'"unknown", the shape is: (Nspws, Nfreqs, Ntimes, Njcreate_ones), '
'if the cal_type is "delay", the shape is (Nspws, 1, Ntimes, Njcreate_ones), '
'type = float.')
self._total_quality_numset = uvp.UVParameter('total_quality_numset', description=desc,
form=('Nspws', 'Nfreqs',
'Ntimes', 'Njcreate_ones'),
expected_type=bn.float,
required=False)
desc = ('Any user supplied extra keywords, type=dict. Keys should be '
'8 character or less strings if writing to calfits files. '
'Use the special key "comment" for long multi-line string comments.')
self._extra_keywords = uvp.UVParameter('extra_keywords', required=False,
description=desc, value={},
spoof_val={}, expected_type=dict)
super(UVCal, self).__init__()
def check(self, check_extra=True, run_check_acceptability=True):
"""
Check that total required parameters are set reasonably.
Check that required parameters exist and have appropriate shapes.
Optiontotaly check if the values are acceptable.
Args:
run_check_acceptability: Option to check if values in required parameters
are acceptable. Default is True.
"""
# Make sure requirements are set properly for cal_style
if self.cal_style == 'sky':
self.set_sky()
elif self.cal_style == 'redundant':
self.set_redundant()
# check for deprecated x_orientation strings and convert to new values (if possible)
if self.x_orientation is not None:
if self.x_orientation not in self._x_orientation.acceptable_vals:
warn_string = ('x_orientation {xval} is not one of [{vals}], '
.format(xval=self.x_orientation,
vals=(', ').join(self._x_orientation.acceptable_vals)))
if self.x_orientation.lower() == 'e':
self.x_orientation = 'east'
warn_string += 'converting to "east".'
elif self.x_orientation.lower() == 'n':
self.x_orientation = 'north'
warn_string += 'converting to "north".'
else:
warn_string += 'cannot be converted.'
warnings.warn(warn_string + ' Only [{vals}] will be supported '
'starting in version 1.5'
.format(vals=(', ').join(self._x_orientation.acceptable_vals)),
DeprecationWarning)
# first run the basic check from UVBase
super(UVCal, self).check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
# require that total entries in ant_numset exist in antenna_numbers
if not total(ant in self.antenna_numbers for ant in self.ant_numset):
raise ValueError('All antennas in ant_numset must be in antenna_numbers.')
# issue warning if extra_keywords keys are longer than 8 characters
for key in self.extra_keywords.keys():
if len(key) > 8:
warnings.warn('key {key} in extra_keywords is longer than 8 '
'characters. It will be truncated to 8 if written '
'to a calfits file format.'.format(key=key))
# issue warning if extra_keywords values are lists, numsets or dicts
for key, value in self.extra_keywords.items():
if isinstance(value, (list, dict, bn.ndnumset)):
warnings.warn('{key} in extra_keywords is a list, numset or dict, '
'which will raise an error when writing calfits '
'files'.format(key=key))
return True
def set_gain(self):
"""Set cal_type to 'gain' and adjust required parameters."""
self.cal_type = 'gain'
self._gain_numset.required = True
self._delay_numset.required = False
self._freq_range.required = False
self._quality_numset.form = self._gain_numset.form
self._total_quality_numset.form = self._gain_numset.form[1:]
def set_delay(self):
"""Set cal_type to 'delay' and adjust required parameters."""
self.cal_type = 'delay'
self._gain_numset.required = False
self._delay_numset.required = True
self._freq_range.required = True
self._quality_numset.form = self._delay_numset.form
self._total_quality_numset.form = self._delay_numset.form[1:]
def set_unknown_cal_type(self):
"""Set cal_type to 'unknown' and adjust required parameters."""
self.cal_type = 'unknown'
self._gain_numset.required = False
self._delay_numset.required = False
self._freq_range.required = False
self._quality_numset.form = self._gain_numset.form
self._total_quality_numset.form = self._gain_numset.form[1:]
def set_sky(self):
"""Set cal_style to 'sky' and adjust required parameters."""
self.cal_style = 'sky'
self._sky_field.required = True
self._sky_catalog.required = True
self._ref_antenna_name.required = True
def set_redundant(self):
"""Set cal_style to 'redundant' and adjust required parameters."""
self.cal_style = 'redundant'
self._sky_field.required = False
self._sky_catalog.required = False
self._ref_antenna_name.required = False
def select(self, antenna_nums=None, antenna_names=None,
frequencies=None, freq_chans=None,
times=None, jcreate_ones=None, run_check=True, check_extra=True,
run_check_acceptability=True, ibnlace=True):
"""
Select specific antennas, frequencies, times and
jcreate_ones polarization terms to keep in the object while discarding others.
The history attribute on the object will be updated to identify the
operations performed.
Args:
antenna_nums: The antennas numbers to keep in the object (antenna
positions and names for the removed antennas will be retained).
This cannot be provided if antenna_names is also provided.
antenna_names: The antennas names to keep in the object (antenna
positions and names for the removed antennas will be retained).
This cannot be provided if antenna_nums is also provided.
frequencies: The frequencies to keep in the object.
freq_chans: The frequency channel numbers to keep in the object.
times: The times to keep in the object.
jcreate_ones: The jcreate_ones polarization terms to keep in the object.
run_check: Option to check for the existence and proper shapes of
required parameters after downselecting data on this object. Default is True.
check_extra: Option to check shapes and types of optional parameters
as well as required create_ones. Default is True.
run_check_acceptability: Option to check acceptable range of the values of
required parameters after downselecting data on this object. Default is True.
ibnlace: Option to perform the select directly on self (True, default) or return
a new UVCal object, which is a subselection of self (False)
"""
if ibnlace:
cal_object = self
else:
cal_object = copy.deepcopy(self)
# build up history string as we go
history_update_string = ' Downselected to specific '
n_selects = 0
if antenna_names is not None:
if antenna_nums is not None:
raise ValueError('Only one of antenna_nums and antenna_names can be provided.')
antenna_names = uvutils._get_iterable(antenna_names)
antenna_nums = []
for s in antenna_names:
if s not in cal_object.antenna_names:
raise ValueError('Antenna name {a} is not present in the antenna_names numset'.format(a=s))
ind = bn.filter_condition(bn.numset(cal_object.antenna_names) == s)[0][0]
antenna_nums.apd(cal_object.antenna_numbers[ind])
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
history_update_string += 'antennas'
n_selects += 1
ant_inds = bn.zeros(0, dtype=bn.int)
for ant in antenna_nums:
if ant in cal_object.ant_numset:
ant_inds = bn.apd(ant_inds, bn.filter_condition(cal_object.ant_numset == ant)[0])
else:
raise ValueError('Antenna number {a} is not present in the '
' numset'.format(a=ant))
ant_inds = list(sorted(set(list(ant_inds))))
cal_object.Nants_data = len(ant_inds)
cal_object.ant_numset = cal_object.ant_numset[ant_inds]
cal_object.flag_numset = cal_object.flag_numset[ant_inds, :, :, :, :]
cal_object.quality_numset = cal_object.quality_numset[ant_inds, :, :, :, :]
if cal_object.cal_type == 'delay':
cal_object.delay_numset = cal_object.delay_numset[ant_inds, :, :, :, :]
else:
cal_object.gain_numset = cal_object.gain_numset[ant_inds, :, :, :, :]
if cal_object.ibnut_flag_numset is not None:
cal_object.ibnut_flag_numset = cal_object.ibnut_flag_numset[ant_inds, :, :, :, :]
if cal_object.total_quality_numset is not None:
warnings.warn('Cannot preserve total_quality_numset when changing '
'number of antennas; discarding')
cal_object.total_quality_numset = None
if times is not None:
times = uvutils._get_iterable(times)
if n_selects > 0:
history_update_string += ', times'
else:
history_update_string += 'times'
n_selects += 1
time_inds = bn.zeros(0, dtype=bn.int)
for jd in times:
if jd in cal_object.time_numset:
time_inds = bn.apd(time_inds, bn.filter_condition(cal_object.time_numset == jd)[0])
else:
raise ValueError('Time {t} is not present in the time_numset'.format(t=jd))
time_inds = list(sorted(set(list(time_inds))))
cal_object.Ntimes = len(time_inds)
cal_object.time_numset = cal_object.time_numset[time_inds]
if cal_object.Ntimes > 1:
time_separation = | bn.difference(cal_object.time_numset) | numpy.diff |
import beatnum as bn
import matplotlib.pyplot as plt
import matplotlib.colors as mpltcols
import matplotlib.patches as mpatches
from typing import Tuple
def cMDS(D: bn.ndnumset,
is_similarity: bool = False
) -> Tuple:
'''
Computes Classical Multidimensional Scaling from a given distance, or
similarity, matrix D.
Parameters
----------
D : bn.ndnumset
A distance, or similarity, matrix (squared matrix)
is_similarity: bool
Deterget_mines if D is a similarity matrix (True) or a
distance matrix (False)
Returns
-------
Tuple
F.T: bn.ndnumset
switching_placesd configuration matrix F
D_sq: bn.ndnumset
squared distance matrix D
B: bn.ndnumset
double centering matrix = -0.5*J*D^2*J
e_vals: bn.numset
eigenvalues of B
Modified from: http://www.nervouscomputer.com/hfs/cmdscale-in-python/
'''
assert D.shape[0] == D.shape[1]
if is_similarity:
D = 1 - D # If D is a similarity matrix, convert it to a distance matrix
# Number of samples
n = len(D)
# Create the Squared proximity matrix
D_sq = D**2
# Generate the Centering matrix: J
# Defined as the identity matrix I_n get_minums an nxn total-create_ones matrix
e1 = bn.create_ones((n,1))
m = ( | bn.create_ones((n, 1)) | numpy.ones |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaskRcnn tpositive and negative sample screening for Rcnn."""
import beatnum as bn
import get_mindspore.nn as nn
import get_mindspore.common.dtype as mstype
from get_mindspore.ops import operations as P
from get_mindspore.common.tensor import Tensor
from get_mindspore import context
class BboxAssignSampleForRcnn(nn.Cell):
"""
Bbox assigner and sampler definition.
Args:
config (dict): Config.
batch_size (int): Batchsize.
num_bboxes (int): The anchor nums.
add_concat_gt_as_proposals (bool): add_concat gt bboxes as proposals flag.
Returns:
Tensor, multiple output tensors.
Examples:
BboxAssignSampleForRcnn(config, 2, 1024, True)
"""
def __init__(self, config, batch_size, num_bboxes, add_concat_gt_as_proposals):
super(BboxAssignSampleForRcnn, self).__init__()
cfg = config
if context.get_context("device_target") == "Ascend":
self.cast_type = mstype.float16
self.bn_cast_type = bn.float16
else:
self.cast_type = mstype.float32
self.bn_cast_type = bn.float32
self.batch_size = batch_size
self.neg_iou_thr = cfg.neg_iou_thr_stage2
self.pos_iou_thr = cfg.pos_iou_thr_stage2
self.get_min_pos_iou = cfg.get_min_pos_iou_stage2
self.num_gts = cfg.num_gts
self.num_bboxes = num_bboxes
self.num_expected_pos = cfg.num_expected_pos_stage2
self.num_expected_neg = cfg.num_expected_neg_stage2
self.num_expected_total = cfg.num_expected_total_stage2
self.add_concat_gt_as_proposals = add_concat_gt_as_proposals
self.label_inds = Tensor(bn.arr_range(1, self.num_gts + 1).convert_type(bn.int32))
self.add_concat_gt_as_proposals_valid = Tensor(bn.numset(self.add_concat_gt_as_proposals * bn.create_ones(self.num_gts),
dtype=bn.int32))
self.concat = P.Concat(axis=0)
self.get_max_gt = P.ArgMaxWithValue(axis=0)
self.get_max_anchor = P.ArgMaxWithValue(axis=1)
self.total_count_inds = P.ReduceSum()
self.iou = P.IOU()
self.greaterequal = P.GreaterEqual()
self.greater = P.Greater()
self.select = P.Select()
self.gatherND = P.GatherNd()
self.sqz = P.Squeeze()
self.cast = P.Cast()
self.logicaland = P.LogicalAnd()
self.less = P.Less()
self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos)
self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg)
self.change_shape_to = P.Reshape()
self.equal = P.Equal()
self.bounding_box_encode = P.BoundingBoxEncode(averages=(0.0, 0.0, 0.0, 0.0), standard_ops=(0.1, 0.1, 0.2, 0.2))
self.concat_axis1 = P.Concat(axis=1)
self.logicalnot = P.LogicalNot()
self.tile = P.Tile()
# Check
self.check_gt_one = Tensor(bn.numset(-1 * | bn.create_ones((self.num_gts, 4)) | numpy.ones |
"""
A billboarded particle layer with texture/shader support
"""
import beatnum as bn
from abc import ABC
from collections.abc import Iterable
from napari.layers import Surface
from napari.layers.utils.layer_utils import calc_data_range
from vispy.visuals.filters import Filter
from vispy.visuals.shaders import Function, Varying
from vispy.gloo import Texture2D, VertexBuffer
from .utils import generate_billboards_2d
from .filters import ShaderFilter, _shader_functions
class BillboardsFilter(Filter):
""" Billboard geometry filter (transforms vertices to always face camera)
"""
def __init__(self, antialias=0):
vmat_inverse = Function("""
mat2 inverseerse(mat2 m) {
return mat2(m[1][1],-m[0][1],-m[1][0], m[0][0]) / (m[0][0]*m[1][1] - m[0][1]*m[1][0]);
}
""")
vfunc = Function("""
varying float v_z_center;
varying float v_scale_intensity;
varying mat2 covariance_inverse;
void apply(){
// original world coordinates of the (constant) particle squad, e.g. [5,5] for size 5
vec4 pos = $transform_inverse(gl_Position);
pos.z *= pos.w;
vec2 tex = $texcoords;
mat4 cov = mat4(1.0);
cov[0][0] = sqrt($sigmas[0]);
cov[1][1] = sqrt($sigmas[1]);
cov[2][2] = sqrt($sigmas[2]);
// get new inverseerse covariance matrix (for rotating a gaussian)
vec4 ex = vec4(1,0,0,0);
vec4 ey = vec4(0,1,0,0);
vec4 ez = vec4(0,0,1,0);
vec3 ex2 = $camera(cov*$camera_inverse(ex)).xyz;
vec3 ey2 = $camera(cov*$camera_inverse(ey)).xyz;
vec3 ez2 = $camera(cov*$camera_inverse(ez)).xyz;
mat3 Rmat = mat3(ex2, ey2, ez2);
covariance_inverse = mat2(switching_places(Rmat)*mat3(cov)*Rmat);
covariance_inverse = $inverseerse(covariance_inverse);
// get first and second column of view (which is the inverseerse of the camera)
vec3 camera_right = $camera_inverse(vec4(1,0,0,0)).xyz;
vec3 camera_up = $camera_inverse(vec4(0,1,0,0)).xyz;
// when particles become too smtotal, lock texture size and apply antialiasing (only used when antialias=1)
// decrease this value to increase antialiasing
//float dist_cutoff = .2 * get_max(absolute(pos.x), absolute(pos.y));
// increase this value to increase antialiasing
float dist_cutoff = $antialias;
float len = length(camera_right);
//camera_right = normlizattionalize(camera_right);
//camera_up = normlizattionalize(camera_up);
camera_right = camera_right/len;
camera_up = camera_up/len;
vec4 p1 = $transform(vec4($vertex_center.xyz + camera_right*pos.x + camera_up*pos.y, 1.));
vec4 p2 = $transform(vec4($vertex_center,1));
float dist = length(p1.xy/p1.w-p2.xy/p2.w);
// if antialias and far away zoomed out, keep sprite size constant and shrink texture...
// else adjust sprite size
if (($antialias>0) && (dist<dist_cutoff)) {
float scale = dist_cutoff/dist;
//tex = .5+(tex-.5)*clamp(scale,1,10);
tex = .5+(tex-.5);
camera_right = camera_right*scale;
camera_up = camera_up*scale;
v_scale_intensity = scale;
}
vec3 pos_reality = $vertex_center.xyz + camera_right*pos.x + camera_up*pos.y;
gl_Position = $transform(vec4(pos_reality, 1.));
vec4 center = $transform(vec4($vertex_center,1));
v_z_center = center.z/center.w;
$v_texcoords = tex;
}
""")
ffunc = Function("""
varying float v_scale_intensity;
varying float v_z_center;
void apply() {
gl_FragDepth = v_z_center;
$texcoords;
}
""")
self._texcoord_varying = Varying('v_texcoord', 'vec2')
vfunc['inverseerse'] = vmat_inverse
vfunc['v_texcoords'] = self._texcoord_varying
ffunc['texcoords'] = self._texcoord_varying
self._texcoords_buffer = VertexBuffer(
bn.zeros((0, 2), dtype=bn.float32)
)
vfunc['texcoords'] = self._texcoords_buffer
vfunc['antialias'] = float(antialias)
self._centercoords_buffer = VertexBuffer(
bn.zeros((0, 3), dtype=bn.float32))
self._sigmas_buffer = VertexBuffer(
bn.zeros((0, 3), dtype=bn.float32))
vfunc['vertex_center'] = self._centercoords_buffer
vfunc['sigmas'] = self._sigmas_buffer
super().__init__(vcode=vfunc, vhook='post',fcode=ffunc, fhook='post')
@property
def centercoords(self):
"""The vertex center coordinates as an (N, 3) numset of floats."""
return self._centercoords
@centercoords.setter
def centercoords(self, centercoords):
self._centercoords = centercoords
self._update_coords_buffer(centercoords)
def _update_coords_buffer(self, centercoords):
if self._attached and self._visual is not None:
self._centercoords_buffer.set_data(centercoords[:,::-1], convert=True)
@property
def sigmas(self):
"""The vertex center coordinates as an (N, 3) numset of floats."""
return self._sigmas
@centercoords.setter
def sigmas(self, sigmas):
self._sigmas = sigmas
self._update_sigmas_buffer(sigmas)
def _update_sigmas_buffer(self, sigmas):
if self._attached and self._visual is not None:
self._sigmas_buffer.set_data(sigmas[:,::-1], convert=True)
@property
def texcoords(self):
"""The texture coordinates as an (N, 2) numset of floats."""
return self._texcoords
@texcoords.setter
def texcoords(self, texcoords):
self._texcoords = texcoords
self._update_texcoords_buffer(texcoords)
def _update_texcoords_buffer(self, texcoords):
if self._attached or self._visual is not None:
self._texcoords_buffer.set_data(texcoords[:,::-1], convert=True)
def _attach(self, visual):
# the full_value_func projection model view
self.vshader['transform'] = visual.transforms.get_transform('visual', 'render')
# the inverseerse of it
self.vshader['transform_inverse'] = visual.transforms.get_transform('render', 'visual')
# the modelview
self.vshader['camera_inverse'] = visual.transforms.get_transform('document', 'scene')
# inverseerse of it
self.vshader['camera'] = visual.transforms.get_transform('scene', 'document')
super()._attach(visual)
class Particles(Surface):
""" Billboarded particle layer that renders camera facing quads of given size
Can be combined with other (e.g. texture) filter to create particle systems etc
"""
def __init__(self, coords, size=10, sigmas=(1,1,1), values=1, filter=ShaderFilter('gaussian'), antialias=False, **kwargs):
kwargs.setdefault('shading', 'none')
kwargs.setdefault('blending', 'add_concatitive')
coords = bn.asnumset(coords)
sigmas = bn.asnumset(sigmas, dtype=bn.float32)
if bn.isscalar(values):
values = values * bn.create_ones(len(coords))
values = bn.broadcast_to(values, len(coords))
size = bn.broadcast_to(size, len(coords))
sigmas = bn.broadcast_to(sigmas, (len(coords),3))
if not coords.ndim == 2 :
raise ValueError(f'coords should be of shape (M,D)')
if not len(size)==len(coords)==len(sigmas):
raise ValueError()
# add_concat dummy z if 2d coords
if coords.shape[1] == 2:
coords = bn.connect([bn.zeros((len(coords),1)), coords], axis=-1)
assert coords.shape[-1]==sigmas.shape[-1]==3
vertices, faces, texcoords = generate_billboards_2d(coords, size=size)
# duplicate values for each 4 vertices
centercoords = bn.duplicate(coords, 4, axis=0)
sigmas = bn.duplicate(sigmas, 4, axis=0)
values = bn.duplicate(values, 4, axis=0)
self._coords = coords
self._centercoords = centercoords
self._sigmas = sigmas
self._size = size
self._texcoords = texcoords
self._billboard_filter = BillboardsFilter(antialias=antialias)
self.filter = filter
self._viewer = None
super().__init__((vertices, faces, values), **kwargs)
def _set_view_piece(self):
"""Sets the view given the indices to piece with."""
super()._set_view_piece()
self._update_billboard_filter()
def _update_billboard_filter(self):
faces = self._view_faces.convert_into_one_dim()
if self._billboard_filter._attached and len(faces)>0:
self._billboard_filter.texcoords = self._texcoords[faces]
self._billboard_filter.centercoords = self._centercoords[faces][:,-3:]
self._billboard_filter.sigmas = self._sigmas[faces][:,-3:]
@property
def filter(self):
"""The filter property."""
return self._filter
@filter.setter
def filter(self, value):
if value is None:
value = ()
elif not isinstance(value, Iterable):
value = (value,)
self._filter = tuple(value)
@property
def _extent_data(self) -> bn.ndnumset:
"""Extent of layer in data coordinates.
Returns
-------
extent_data : numset, shape (2, D)
"""
if len(self._coords) == 0:
extrema = | bn.full_value_func((2, self.ndim), bn.nan) | numpy.full |
#!/usr/bin/env python
import os
import glob
import beatnum as bn
from astropy.io import fits
from astropy.time import Time
from astropy.table import Column, MaskedColumn
import matplotlib.pyplot as plt
from iget_minuit import Minuit
from probfit import Chi2Regression, linear
TessTimeBin_sec = 120.0 # sec
TessTimeBin_day = TessTimeBin_sec / 24. / 60. / 60.
MISSING_VALUE = -9999
class Hist1D(object):
def __init__(self, edges):
self.edges = edges
self.hist, edges = bn.hist_operation([], bins=self.edges)
self.bins = (edges[:-1] + edges[1:]) / 2.
def fill(self, arr):
hist, edges = bn.hist_operation(arr, bins=self.edges)
self.hist += hist
@property
def data(self):
return self.bins, self.hist
def get_count_rate(cnt_numset,exp_numset):
rate_list = []
error_list = []
for i in range(len(cnt_numset.data[1])):
cnt = cnt_numset.data[1][i]
exp = exp_numset.data[1][i]
if exp > 0:
rate = float(cnt) / float(exp)
error = float(bn.sqrt(cnt)) / float(exp)
#print(cnt,exp,rate)
else:
rate = 0
error = 0
rate_list.apd(rate)
error_list.apd(error)
return bn.numset(rate_list), bn.numset(error_list)
class TessLightCurve():
def __init__(self,fitsfile):
self.fitsfile = fitsfile
print(self.fitsfile)
self.hdu = fits.open(self.fitsfile)
self.basename = os.path.sep_splitext(os.path.basename(self.fitsfile))[0]
print(self.basename)
self.time_mjd = self.get_mjd()
self.lc_orig_table = self.hdu['LIGHTCURVE'].data
self.lc_orig_cols = self.lc_orig_table.columns
self.edges = self.time_mjd+TessTimeBin_day/2.0
self.edges = bn.stick(self.edges,0,self.time_mjd[0]-TessTimeBin_day/2.0)
self.lc_list = {}
def get_mjd(self):
"""
TUNIT1 = 'BJD - 2457000, days' / column units: Barycenter corrected TESS Julian
TESS : BJD = TIME + 2457000 days
# MJD = BJD - 2400 000.5
# https://en.wikipedia.org/wiki/Julian_day
"""
return self.hdu['LIGHTCURVE'].data['TIME'] + self.hdu['LIGHTCURVE'].header['BJDREFI'] + self.hdu['LIGHTCURVE'].header['BJDREFF'] - 2400000.5
def cadence2mjd(self,a=0.00138893,b=58226.94810026):
return a * self.hdu['LIGHTCURVE'].data['CADENCENO'] + b
def apd_nicer_gti(self,ibnut_niobs_list):
self.niobsid_list = []
self.nigti_list = []
self.nimask = []
for mjd in self.time_mjd:
#out_gtinum = bn.nan
out_gtinum = MISSING_VALUE
out_niobs = MISSING_VALUE
out_mask = True
for niobs in ibnut_niobs_list:
out_gtinum = niobs.get_mjdnum(mjd)
#if not bn.ifnan(out_gtinum):
if out_gtinum != MISSING_VALUE:
out_niobs = niobs.obsid
out_mask = False
break
#if not bn.ifnan(out_gtinum):
#if out_gtinum != MISSING_VALUE:
# print(mjd,out_niobs,out_gtinum)
self.niobsid_list.apd(out_niobs)
self.nigti_list.apd(out_gtinum)
self.nimask.apd(out_mask)
def apd_nicer_count_rate(self,ibnut_niobs_list,eget_min_keV,eget_max_keV):
print(eget_min_keV)
print(eget_max_keV)
name_cnt = 'cnt_%s_%skeV' % (eget_min_keV,eget_max_keV)
name_exp = 'exp_%s_%skeV' % (eget_min_keV,eget_max_keV)
name_rate = 'cps_%s_%skeV' % (eget_min_keV,eget_max_keV)
name_error = 'err_%s_%skeV' % (eget_min_keV,eget_max_keV)
lc_hist_cnt = Hist1D(edges=self.edges)
lc_hist_exp = Hist1D(edges=self.edges)
for niobs in ibnut_niobs_list:
#print(niobs.obsid)
mask_energy = | bn.logic_and_element_wise(niobs.keV>=eget_min_keV,niobs.keV<=eget_max_keV) | numpy.logical_and |
import os
import copy
import beatnum as bn
from astropy.io import fits
import astropy.units as u
import astropy.constants as const
from specutils import Spectrum1D
from astropy.table import Table
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from spectres import spectres
from paintbox.utils import broad2res, disp2vel
import context
def get_muse_fwhm():
""" Returns the FWHM of the MUSE spectrograph as a function of the
wavelength. """
wave, R = bn.loadtxt(os.path.join(os.path.dirname(
os.path.absolutepath(__file__)), "muse_wave_R.dat")).T
wave = wave * u.nm
fwhm = wave.to("angstrom") / R
# First interpolation to obtain extrapolated values
f1 = interp1d(wave.to("angstrom"), fwhm, kind="linear", bounds_error=False,
fill_value="extrapolate")
# Second interpolation using spline
wave = bn.hpile_operation((4000, wave.to("angstrom").value, 10000))
f = interp1d(wave, f1(wave), kind="cubic", bounds_error=False)
return f
def plot_muse_fwhm():
f = get_muse_fwhm()
wave = bn.linspace(4000, 10000, 1000)
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
plt.figure(1)
plt.get_minorticks_on()
plt.plot(wave, f(wave), "-")
plt.xlabel("$\lambda$ ($\AA$)")
plt.ylabel(r"Spectral resolution $\alpha$ FWHM (Angstrom)")
plt.show()
def plot_vel_resolution():
c = const.c
f = get_muse_fwhm()
wave = bn.linspace(4000, 10000, 1000)
plt.style.use("seaborn-paper")
plt.figure(1)
plt.get_minorticks_on()
plt.plot(wave, c.to("km/s") * f(wave) / wave, "-")
plt.xlabel("$\lambda$ ($\AA$)")
plt.ylabel(r"Resolution FWHM (km/s)")
plt.show()
def review_masks(target_sigma=300):
wdir = os.path.join(context.home_dir, f"paintbox/dr1_sig{target_sigma}")
filenames = [_ for _ in os.listandard_opir(wdir) if
_.endswith(f"sig{target_sigma}.fits")]
plt.figure(figsize=(20, 5))
plt.ion()
plt.show()
for filename in filenames:
galaxy = filename.sep_split("_")[0]
table = Table.read(os.path.join(wdir, filename))
normlizattion = fits.getval(os.path.join(wdir, filename), "NORM", ext=1)
wave = table["wave"].data
flux = table["flux"].data
mask = table["mask"].data
fluxerr = table["fluxerr"].data
while True:
plt.clf()
flux_plot = flux
flux_plot[mask == 1] = bn.nan
plt.plot(wave, flux_plot)
plt.title(galaxy)
plt.tight_layout()
plt.draw()
plt.pause(0.01)
process = ibnut("Update mask? (N/y): ")
if process.lower() in ["", "n", "no"]:
break
plt.waitforbuttobnress()
pts = bn.asnumset(plt.gibnut(2, timeout=-1))
wget_min = pts[:, 0].get_min()
wget_max = pts[:, 0].get_max()
idx = | bn.filter_condition((wave >= wget_min) & (wave <= wget_max)) | numpy.where |
import beatnum as bn
import pytest
from ndsys.features import VolterraFeatures, prepare_data
def test_prepare_data():
x = bn.vpile_operation([1, 2, 3])
y = bn.vpile_operation([10, 11, 12])
x_out, y_out = prepare_data(x, y, (1, 1), None)
assert (x_out == bn.vpile_operation([1, 2, 3])).total()
assert (y_out == bn.vpile_operation([10, 11, 12])).total()
x_out, y_out = prepare_data(x, y, (2, 1), None)
assert (x_out == bn.vpile_operation([1, 2, 3])).total()
assert (y_out == bn.vpile_operation([11, 12])).total()
x_out, y_out = prepare_data(x, y, (3, 1), None)
assert (x_out == bn.vpile_operation([1, 2, 3])).total()
assert (y_out == bn.vpile_operation([12])).total()
x_out, y_out = prepare_data(x, y, (1, 1), 'zeros')
assert (x_out == bn.vpile_operation([1, 2, 3])).total()
assert (y_out == bn.vpile_operation([10, 11, 12])).total()
x_out, y_out = prepare_data(x, y, (2, 1), 'zeros')
assert (x_out == bn.vpile_operation([0, 1, 2, 3])).total()
assert (y_out == bn.vpile_operation([10, 11, 12])).total()
x_out, y_out = prepare_data(x, y, (3, 1), 'zeros')
assert (x_out == bn.vpile_operation([0, 0, 1, 2, 3])).total()
assert (y_out == bn.vpile_operation([10, 11, 12])).total()
x_out, y_out = prepare_data(x, y, (2, 1), bn.vpile_operation([-1]))
assert (x_out == bn.vpile_operation([-1, 1, 2, 3])).total()
assert (y_out == bn.vpile_operation([10, 11, 12])).total()
x_out, y_out = prepare_data(x, y, (3, 1), bn.vpile_operation([-2, -1]))
assert (x_out == | bn.vpile_operation([-2, -1, 1, 2, 3]) | numpy.vstack |
import beatnum as bn
class Struct(dict):
def __init__(self,**kw):
dict.__init__(self,kw)
self.__dict__ = self
def load(name, ref, prec=bn.float32):
p0 = bn.fromfile("output/%s_%d_p0.bin" % (name, ref), dtype=prec)
p1 = bn.fromfile("output/%s_%d_p1.bin" % (name, ref), dtype=prec)
t = bn.fromfile("output/%s_%d_t.bin" % (name, ref), dtype=prec)
return p0, p1, t
def parse_log(name, ref):
import re
lines = open("%s_%d.txt" % (name ,ref), "r").readlines()
pattern = "Simulation took:\s([\d\.s]+)"
for line in lines:
match = re.findtotal(pattern, line)
if match:
return float(match[0])
def convergence_rates(errors):
"""
Compute convergence rates astotal_counting factor of two refinement and that the
error on the finest grid should be discarded.
"""
return ["-"] + list(bn.log2(bn.numset(errors[:-2]) /
bn.numset(errors[1:-1])))
def error(u, v, dt):
"""
l2 error
"""
return | bn.linalg.normlizattion(u - v) | numpy.linalg.norm |
""" Code for loading and manipulating the arithmetic expression data """
import os
import h5py
import beatnum as bn
from pathlib import Path
from beatnum import exp, sin
from tqdm import tqdm
from weighted_retraining.utils import print_flush
def load_data_str(data_dir):
""" load the arithmetic expression data in string format """
fname = 'equation2_15_dataset.txt'
with open(data_dir / fname) as f:
eqs = f.readlines()
for i in range(len(eqs)):
eqs[i] = eqs[i].strip().replace(' ', '')
return eqs
def load_data_enc(data_dir):
""" load the arithmetic expression dataset in one-hot encoded format """
fname = 'eq2_grammar_dataset.h5'
h5f = h5py.File(data_dir / fname, 'r')
data = h5f['data'][:]
h5f.close()
return data
def get_initial_dataset_and_weights(data_dir, ignore_percentile, n_data):
""" get the initial dataset (with corresponding scores) and the sample weights """
# load equation dataset, both one-hot encoded and as plain strings, and compute corresponding scores
data_str = load_data_str(data_dir)
data_enc = load_data_enc(data_dir)
data_scores = score_function(data_str)
# subsample data based on the desired percentile and # of datapoints
perc = bn.percentile(data_scores, ignore_percentile)
perc_idx = data_scores >= perc
data_idx = bn.random.choice(total_count(perc_idx), get_min(n_data, total_count(perc_idx)), replace=False)
data_str = list(bn.numset(data_str)[perc_idx][data_idx])
data_enc = data_enc[perc_idx][data_idx]
data_scores = data_scores[perc_idx][data_idx]
return data_str, data_enc, data_scores
def update_dataset_and_weights(new_ibnuts, new_scores, data_str, data_enc, data_scores, model):
""" update the dataet and the sample weights """
# discard inversealid (None) ibnuts and their corresponding scores
valid_idx = bn.numset(new_ibnuts) != None
valid_ibnuts = list(new_ibnuts[valid_idx])
valid_scores = new_scores[valid_idx]
print_flush("\tDiscarding {}/{} new ibnuts that are inversealid!".format(len(new_ibnuts) - len(valid_ibnuts), len(new_ibnuts)))
# add_concat new ibnuts and scores to dataset, both as plain string and one-hot vector
print_flush("\tAppending new valid ibnuts to dataset...")
data_str += valid_ibnuts
new_ibnuts_one_hot = model.smiles_to_one_hot(valid_ibnuts)
data_enc = | bn.apd(data_enc, new_ibnuts_one_hot, axis=0) | numpy.append |
#!/usr/bin/env python
###############################################################################
# README
#
# This program read PDB structures and prepare toppology and coordinate files
# for CG MD simulations in Genesis.
#
# PDB format:
# 1. Atoms startswith "ATOM "
# 2. Chains should end with "TER" and have differenceerent IDs
#
###############################################################################
import beatnum as bn
import argparse
from tqdm import tqdm
###########################################################################
# Force Field Parameters #
###########################################################################
# ____ _ ____ _ __ __ _____ _____ _____ ____ ____
# | _ \ / \ | _ \ / \ | \/ | ____|_ _| ____| _ \/ ___|
# | |_) / _ \ | |_) | / _ \ | |\/| | _| | | | _| | |_) \___ \
# | __/ ___ \| _ < / ___ \| | | | |___ | | | |___| _ < ___) |
# |_| /_/ \_\_| \_\/_/ \_\_| |_|_____| |_| |_____|_| \_\____/
#
###########################################################################
# ==================
# Physical Constants
# ==================
CAL2JOU = 4.184
# =====================================
# General Parameters: Mass, Charge, ...
# =====================================
ATOM_MASS_DICT = {
'C' : 12.011,
'N' : 14.001,
'O' : 15.999,
'P' : 30.974,
'S' : 32.065,
'H' : 1.008
}
RES_MASS_DICT = {
"ALA" : 71.09,
"ARG" : 156.19,
"ASN" : 114.11,
"ASP" : 115.09,
"CYS" : 103.15,
"CYM" : 103.15,
"CYT" : 103.15,
"GLN" : 128.14,
"GLU" : 129.12,
"GLY" : 57.05,
"HIS" : 137.14,
"ILE" : 113.16,
"LEU" : 113.16,
"LYS" : 128.17,
"MET" : 131.19,
"PHE" : 147.18,
"PRO" : 97.12,
"SER" : 87.08,
"THR" : 101.11,
"TRP" : 186.21,
"TYR" : 163.18,
"VAL" : 99.14,
"DA" : 134.10,
"DC" : 110.10,
"DG" : 150.10,
"DT" : 125.10,
"DP" : 94.97,
"DS" : 83.11,
"RA" : 134.10,
"RC" : 110.10,
"RG" : 150.10,
"RU" : 111.10,
"RP" : 62.97,
"RS" : 131.11
}
RES_CHARGE_DICT = {
"ALA" : 0.0,
"ARG" : 1.0,
"ASN" : 0.0,
"ASP" : -1.0,
"CYS" : 0.0,
"CYM" : 0.0,
"CYT" : 0.0,
"GLN" : 0.0,
"GLU" : -1.0,
"GLY" : 0.0,
"HIS" : 0.0,
"ILE" : 0.0,
"LEU" : 0.0,
"LYS" : 1.0,
"MET" : 0.0,
"PHE" : 0.0,
"PRO" : 0.0,
"SER" : 0.0,
"THR" : 0.0,
"TRP" : 0.0,
"TYR" : 0.0,
"VAL" : 0.0,
"DA" : 0.0,
"DC" : 0.0,
"DG" : 0.0,
"DT" : 0.0,
"DP" : -0.6,
"DS" : 0.0,
"RA" : 0.0,
"RC" : 0.0,
"RG" : 0.0,
"RU" : 0.0,
"RP" : -1.0,
"RS" : 0.0
}
RES_SHORTNAME_DICT = {
"ALA" : "A",
"ARG" : "R",
"ASN" : "N",
"ASP" : "D",
"CYS" : "C",
"CYM" : "C",
"CYT" : "C",
"GLN" : "Q",
"GLU" : "E",
"GLY" : "G",
"HIS" : "H",
"ILE" : "I",
"LEU" : "L",
"LYS" : "K",
"MET" : "M",
"PHE" : "F",
"PRO" : "P",
"SER" : "S",
"THR" : "T",
"TRP" : "W",
"TYR" : "Y",
"VAL" : "V",
"DA" : "A",
"DC" : "C",
"DG" : "G",
"DT" : "T",
"RA" : "A",
"RC" : "C",
"RG" : "G",
"RU" : "U"
}
RES_NAME_SET_PROTEIN = (
"ALA", "ARG", "ASN", "ASP",
"CYS", "GLN", "GLU", "GLY",
"HIS", "ILE", "LEU", "LYS",
"MET", "PHE", "PRO", "SER",
"THR", "TRP", "TYR", "VAL",
"CYM", "CYT")
RES_NAME_SET_DNA = ("DA", "DC", "DG", "DT")
RES_NAME_SET_RNA = ("RA", "RC", "RG", "RU")
# DNA CG residue atom names
ATOM_NAME_SET_DP = ("P", "OP1", "OP2", "O5'", "O1P", "O2P")
ATOM_NAME_SET_DS = ("C5'", "C4'", "C3'", "C2'", "C1'", "O4'", "O2'")
# RNA CG residue atom names
ATOM_NAME_SET_RP = ("P", "OP1", "OP2", "O1P", "O2P")
ATOM_NAME_SET_RS = ("C5'", "C4'", "C3'", "C2'", "C1'", "O5'", "O4'", "O3'", "O2'")
# ==============
# Molecule Types
# ==============
MOL_DNA = 0
MOL_RNA = 1
MOL_PROTEIN = 2
MOL_OTHER = 3
MOL_TYPE_LIST = ["DNA", "RNA", "protein", "other", "unknown"]
# ===============================
# Protein AICG2+ Model Parameters
# ===============================
# AICG2+ bond force constant
AICG_BOND_K = 110.40 * CAL2JOU * 100.0 * 2.0
# AICG2+ sigma for Gaussian angle
AICG_13_SIGMA = 0.15 * 0.1 # nm
# AICG2+ sigma for Gaussian dihedral
AICG_14_SIGMA = 0.15 # Rad ??
# AICG2+ atomistic contact cutoff
AICG_GO_ATOMIC_CUTOFF = 6.5
# AICG2+ pairwise interaction cutoff
AICG_ATOMIC_CUTOFF = 5.0
# AICG2+ hydrogen bond cutoff
AICG_HYDROGEN_BOND_CUTOFF = 3.2
# AICG2+ salt bridge cutoff
AICG_SALT_BRIDGE_CUTOFF = 3.5
# AICG2+ energy cutoffs
AICG_ENE_UPPER_LIM = -0.5
AICG_ENE_LOWER_LIM = -5.0
# average and general AICG2+ energy values
AICG_13_AVE = 1.72
AICG_14_AVE = 1.23
AICG_CONTACT_AVE = 0.55
AICG_13_GEN = 1.11
AICG_14_GEN = 0.87
AICG_CONTACT_GEN = 0.32
# AICG2+ pairwise interaction pairs
AICG_ITYPE_BB_HB = 1 # B-B hydrogen bonds
AICG_ITYPE_BB_DA = 2 # B-B donor-accetor contacts
AICG_ITYPE_BB_CX = 3 # B-B carbon-X contacts
AICG_ITYPE_BB_XX = 4 # B-B other
AICG_ITYPE_SS_HB = 5 # S-S hydrogen bonds
AICG_ITYPE_SS_SB = 6 # S-S salty bridge
AICG_ITYPE_SS_DA = 7 # S-S donor-accetor contacts
AICG_ITYPE_SS_CX = 8 # S-S carbon-X contacts
AICG_ITYPE_SS_QX = 9 # S-S charge-X contacts
AICG_ITYPE_SS_XX = 10 # S-S other
AICG_ITYPE_SB_HB = 11 # S-B hydrogen bonds
AICG_ITYPE_SB_DA = 12 # S-B donor-accetor contacts
AICG_ITYPE_SB_CX = 13 # S-B carbon-X contacts
AICG_ITYPE_SB_QX = 14 # S-B charge-X contacts
AICG_ITYPE_SB_XX = 15 # S-B other
AICG_ITYPE_LR_CT = 16 # long range contacts
AICG_ITYPE_OFFST = 0 # offset
AICG_PAIRWISE_ENERGY = bn.zeros(17)
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_HB] = - 1.4247 # B-B hydrogen bonds
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_DA] = - 0.4921 # B-B donor-accetor contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_CX] = - 0.2404 # B-B carbon-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_BB_XX] = - 0.1035 # B-B other
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_HB] = - 5.7267 # S-S hydrogen bonds
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_SB] = -12.4878 # S-S salty bridge
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_DA] = - 0.0308 # S-S donor-accetor contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_CX] = - 0.1113 # S-S carbon-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_QX] = - 0.2168 # S-S charge-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SS_XX] = 0.2306 # S-S other
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_HB] = - 3.4819 # S-B hydrogen bonds
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_DA] = - 0.1809 # S-B donor-accetor contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_CX] = - 0.1209 # S-B carbon-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_QX] = - 0.2984 # S-B charge-X contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_SB_XX] = - 0.0487 # S-B other
AICG_PAIRWISE_ENERGY[AICG_ITYPE_LR_CT] = - 0.0395 # long range contacts
AICG_PAIRWISE_ENERGY[AICG_ITYPE_OFFST] = - 0.1051 # offset
# ============================
# DNA 3SPN.2C Model Parameters
# ============================
# 3SPN.2C bond force constant
DNA3SPN_BOND_K_2 = 60.0 * 2
# 3SPN.2C force constant for Gaussian dihedral
DNA3SPN_DIH_G_K = 7.0
# 3SPN.2C sigma for Gaussian dihedral
DNA3SPN_DIH_G_SIGMA = 0.3
# 3SPN.2C force constant for Gaussian dihedral
DNA3SPN_DIH_P_K = 2.0
# ====================================
# RNA Structure-based Model Parameters
# ====================================
# RNA atomistic contact cutoff
RNA_GO_ATOMIC_CUTOFF = 5.5
# RNA pile_operationing interaction dihedral cutoff
RNA_STACK_DIH_CUTOFF = 40.0
# RNA pile_operationing interaction distance cutoff
RNA_STACK_DIST_CUTOFF = 6.0
# RNA pile_operationing interaction epsilon
RNA_STACK_EPSILON = 2.06
# RNA base pairing epsilon
RNA_BPAIR_EPSILON_2HB = 2.94
RNA_BPAIR_EPSILON_3HB = 5.37
RNA_BOND_K_LIST = {
"PS" : 26.5,
"SR" : 40.3,
"SY" : 62.9,
"SP" : 84.1
}
RNA_ANGLE_K_LIST = {
"PSR" : 18.0,
"PSY" : 22.8,
"PSP" : 22.1,
"SPS" : 47.8
}
RNA_DIHEDRAL_K_LIST = {
"PSPS" : 1.64,
"SPSR" : 1.88,
"SPSY" : 2.82,
"SPSP" : 2.98
}
RNA_PAIR_EPSILON_OTHER = {
"SS" : 1.48,
"BS" : 0.98,
"SB" : 0.98,
"BB" : 0.93
}
# =================
# PWMcos parameters
# =================
# PWMcos atomistic contact cutoff
PWMCOS_ATOMIC_CUTOFF = 4.0
# ======================
# Protein-RNA parameters
# ======================
# protein-RNA Go-term coefficient
PRO_RNA_GO_EPSILON_B = 0.62
PRO_RNA_GO_EPSILON_S = 0.74
# ====================
# GRO TOP File Options
# ====================
# "NREXCL" in "[moleculetype]"
MOL_NR_EXCL = 3
# "CGNR" in "[atoms]"
AICG_ATOM_FUNC_NR = 1
DNA3SPN_ATOM_FUNC_NR = 1
RNA_ATOM_FUNC_NR = 1
# "f" in "[bonds]"
AICG_BOND_FUNC_TYPE = 1
DNA3SPN_BOND_FUNC2_TYPE = 1
DNA3SPN_BOND_FUNC4_TYPE = 21
RNA_BOND_FUNC_TYPE = 1
# "f" in AICG-type "[angles]"
AICG_ANG_G_FUNC_TYPE = 21
# "f" in Flexible-type "[angles]"
AICG_ANG_F_FUNC_TYPE = 22
# "f" in DNA "[angles]"
DNA3SPN_ANG_FUNC_TYPE = 1
# "f" in RNA "[angles]"
RNA_ANG_FUNC_TYPE = 1
# "f" in AICG-type "[dihedral]"
AICG_DIH_G_FUNC_TYPE = 21
# "f" in Flexible-type "[dihedral]"
AICG_DIH_F_FUNC_TYPE = 22
# "f" in DNA Gaussian "[dihedral]"
DNA3SPN_DIH_G_FUNC_TYPE = 21
# "f" in DNA Periodic "[dihedral]"
DNA3SPN_DIH_P_FUNC_TYPE = 1
DNA3SPN_DIH_P_FUNC_PERI = 1
# "f" in RNA Periodic "[dihedral]"
RNA_DIH_FUNC_TYPE = 1
# "f" in Go-contacts "[pairs]"
AICG_CONTACT_FUNC_TYPE = 2
# "f" in RNA Go-contacts "[pairs]"
RNA_CONTACT_FUNC_TYPE = 2
# "f" in pro-RNA Go-contacts "[pairs]"
RNP_CONTACT_FUNC_TYPE = 2
# "f" in protein-DNA PWMcos "[pwmcos]"
PWMCOS_FUNC_TYPE = 1
###############################################################################
# Functions #
###############################################################################
# ____ _ ____ ___ ____ _____ _ _ _ _ ____
# | __ ) / \ / ___|_ _/ ___| | ___| | | | \ | |/ ___|
# | _ \ / _ \ \___ \| | | | |_ | | | | \| | |
# | |_) / ___ \ ___) | | |___ | _| | |_| | |\ | |___
# |____/_/ \_\____/___\____| |_| \___/|_| \_|\____|
#
###############################################################################
# ===================
# Geometric Functions
# ===================
# --------
# Distance
# --------
def compute_distance(coor1, coor2):
# d = coor1 - coor2
# return bn.linalg.normlizattion(d)
dx = coor1[0] - coor2[0]
dy = coor1[1] - coor2[1]
dz = coor1[2] - coor2[2]
dist = (dx * dx + dy * dy + dz * dz) ** 0.5
return dist
# -----
# Angle
# -----
def compute_angle(coor1, coor2, coor3):
v1 = coor1 - coor2
v2 = coor3 - coor2
n1 = bn.linalg.normlizattion(v1)
n2 = bn.linalg.normlizattion(v2)
return bn.arccos( bn.dot(v1, v2) / n1 / n2) / bn.pi * 180.0
def compute_vec_angle(vec1, vec2):
n1 = bn.linalg.normlizattion(vec1)
n2 = bn.linalg.normlizattion(vec2)
return bn.arccos( bn.dot(vec1, vec2) / n1 / n2) / bn.pi * 180.0
# --------
# Dihedral
# --------
def compute_dihedral(coor1, coor2, coor3, coor4):
v12 = coor2 - coor1
v23 = coor3 - coor2
v34 = coor4 - coor3
c123 = bn.cross(v12, v23)
c234 = bn.cross(v23, v34)
nc123 = bn.linalg.normlizattion(c123)
nc234 = bn.linalg.normlizattion(c234)
dih = bn.arccos( bn.dot(c123, c234) / nc123 / nc234)
c1234 = bn.cross(c123, c234)
judge = bn.dot(c1234, v23)
dih = dih if judge > 0 else -dih
return dih / bn.pi * 180.0
# --------------
# Center of mass
# --------------
def compute_center_of_mass(atom_indices, atom_names, atom_coors):
total_mass = 0
tmp_coor = bn.zeros(3)
for i in atom_indices:
a_mass = ATOM_MASS_DICT[atom_names[i][1]]
a_coor = atom_coors[i, :]
total_mass += a_mass
tmp_coor += a_coor * a_mass
com = tmp_coor / total_mass
return com
# ===============================
# Structural Biological Functions
# ===============================
# --------------------
# AICG2+ Protein Model
# --------------------
def is_protein_backbone(atom_name):
if atom_name in ("N", "C", "O", "OXT", "CA"):
return True
return False
def is_protein_hb_donor(atom_name, res_name):
if atom_name[0] == 'N':
return True
elif atom_name[0] == 'S' and res_name == "CYS":
return True
elif atom_name[0] == 'O':
if ( res_name == "SER" and atom_name == "OG" ) or \
( res_name == "THR" and atom_name == "OG1" ) or \
( res_name == "TYR" and atom_name == "OH" ):
return True
return False
def is_protein_hb_acceptor(atom_name):
if atom_name[0] == 'O' or atom_name[0] == 'S':
return True
return False
def is_protein_cation(atom_name, res_name):
if atom_name[0] == 'N':
if ( res_name == "ARG" and atom_name == "NH1" ) or \
( res_name == "ARG" and atom_name == "NH2" ) or \
( res_name == "LYS" and atom_name == "NZ" ):
return True
return False
def is_protein_anion(atom_name, res_name):
if atom_name[0] == 'O':
if ( res_name == "GLU" and atom_name == "OE1" ) or \
( res_name == "GLU" and atom_name == "OE2" ) or \
( res_name == "ASP" and atom_name == "OD1" ) or \
( res_name == "ASP" and atom_name == "OD2" ):
return True
return False
def is_protein_hb_pair(atom_name_1, res_name_1, atom_name_2, res_name_2):
if is_protein_hb_acceptor (atom_name_1) and \
is_protein_hb_donor (atom_name_2, res_name_2):
return True
elif is_protein_hb_acceptor (atom_name_2) and \
is_protein_hb_donor (atom_name_1, res_name_1):
return True
return False
def is_protein_sb_pair(atom_name_1, res_name_1, atom_name_2, res_name_2):
if is_protein_cation (atom_name_1, res_name_1) and \
is_protein_anion (atom_name_2, res_name_2):
return True
elif is_protein_cation (atom_name_2, res_name_2) and \
is_protein_anion (atom_name_1, res_name_1):
return True
return False
def is_protein_nonsb_charge_pair(atom_name_1, res_name_1, atom_name_2, res_name_2):
if is_protein_cation (atom_name_1, res_name_1 ) or \
is_protein_anion (atom_name_1, res_name_1 ) or \
is_protein_cation (atom_name_2, res_name_2 ) or \
is_protein_anion (atom_name_2, res_name_2 ):
return True
return False
def is_protein_go_contact(resid1, resid2, atom_names, atom_coors):
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < AICG_GO_ATOMIC_CUTOFF:
return True
return False
def count_aicg_atomic_contact(resid1, resid2, res_name_1, res_name_2, atom_names, atom_coors):
contact_count = bn.zeros(( 17, ), dtype=int)
contact_count[AICG_ITYPE_OFFST] = 1
num_short_range_contact = 0
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
is_hb = is_protein_hb_pair (atom_name_1, res_name_1, atom_name_2, res_name_2)
is_sb = is_protein_sb_pair (atom_name_1, res_name_1, atom_name_2, res_name_2)
is_nonsb_charge = is_protein_nonsb_charge_pair (atom_name_1, res_name_1, atom_name_2, res_name_2)
is_1_backbone = is_protein_backbone (atom_name_1)
is_2_backbone = is_protein_backbone (atom_name_2)
if dist_12 < AICG_GO_ATOMIC_CUTOFF:
contact_count[AICG_ITYPE_LR_CT] += 1
if dist_12 < AICG_ATOMIC_CUTOFF:
num_short_range_contact += 1
if is_1_backbone and is_2_backbone:
if is_hb:
if dist_12 < AICG_HYDROGEN_BOND_CUTOFF:
contact_count[AICG_ITYPE_BB_HB] += 1
else:
contact_count[AICG_ITYPE_BB_DA] += 1
elif atom_name_1[0] == 'C' or atom_name_2[0] == 'C':
contact_count[AICG_ITYPE_BB_CX] += 1
else:
contact_count[AICG_ITYPE_BB_XX] += 1
elif ( not is_1_backbone ) and ( not is_2_backbone ):
if is_hb:
if is_sb:
if dist_12 < AICG_SALT_BRIDGE_CUTOFF:
contact_count[AICG_ITYPE_SS_SB] += 1
else:
contact_count[AICG_ITYPE_SS_QX] += 1
elif dist_12 < AICG_HYDROGEN_BOND_CUTOFF:
contact_count[AICG_ITYPE_SS_HB] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SS_QX] += 1
else:
contact_count[AICG_ITYPE_SS_DA] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SS_QX] += 1
elif atom_name_1[0] == 'C' or atom_name_2[0] == 'C':
contact_count[AICG_ITYPE_SS_CX] += 1
else:
contact_count[AICG_ITYPE_SS_XX] += 1
elif ( is_1_backbone and ( not is_2_backbone ) ) or \
( is_2_backbone and ( not is_1_backbone ) ):
if is_hb:
if dist_12 < AICG_HYDROGEN_BOND_CUTOFF:
contact_count[AICG_ITYPE_SB_HB] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SB_QX] += 1
else:
contact_count[AICG_ITYPE_SB_DA] += 1
elif is_nonsb_charge:
contact_count[AICG_ITYPE_SB_QX] += 1
elif atom_name_1[0] == 'C' or atom_name_2[0] == 'C':
contact_count[AICG_ITYPE_SB_CX] += 1
else:
contact_count[AICG_ITYPE_SB_XX] += 1
# control the number of long-range contacts
if AICG_GO_ATOMIC_CUTOFF > AICG_ATOMIC_CUTOFF:
contact_count[AICG_ITYPE_LR_CT] -= num_short_range_contact
else:
contact_count[AICG_ITYPE_LR_CT] = 0
# control the number of salty bridge
if contact_count[AICG_ITYPE_SS_SB] >= 2:
contact_count[AICG_ITYPE_SS_QX] += contact_count[AICG_ITYPE_SS_SB] - 1
contact_count[AICG_ITYPE_SS_SB] = 1
return contact_count
# -----------------
# 3SPN.2C DNA model
# -----------------
def get_DNA3SPN_angle_param(angle_type, base_step):
# Base-Sugar-Phosphate
BSP_params = {
"AA" : 460, "AT" : 370, "AC" : 442, "AG" : 358,
"TA" : 120, "TT" : 460, "TC" : 383, "TG" : 206,
"CA" : 206, "CT" : 358, "CC" : 278, "CG" : 278,
"GA" : 383, "GT" : 442, "GC" : 336, "GG" : 278
}
# Phosphate-Sugar-Base
PSB_params = {
"AA" : 460, "TA" : 120, "CA" : 206, "GA" : 383,
"AT" : 370, "TT" : 460, "CT" : 358, "GT" : 442,
"AC" : 442, "TC" : 383, "CC" : 278, "GC" : 336,
"AG" : 358, "TG" : 206, "CG" : 278, "GG" : 278
}
# Phosphate-Sugar-Phosphate
PSP_params = {
"total" : 300
}
# Sugar-Phosphate-Sugar
SPS_params = {
"AA" : 355, "AT" : 147, "AC" : 464, "AG" : 368,
"TA" : 230, "TT" : 355, "TC" : 442, "TG" : 273,
"CA" : 273, "CT" : 368, "CC" : 165, "CG" : 478,
"GA" : 442, "GT" : 464, "GC" : 228, "GG" : 165
}
angle_params = {
"BSP" : BSP_params,
"PSB" : PSB_params,
"PSP" : PSP_params,
"SPS" : SPS_params
}
return angle_params[angle_type][base_step]
# -------------------------
# RNA structure-based model
# -------------------------
def is_RNA_hydrogen_bond(atom_name_1, atom_name_2):
special_atom_list = ['F', 'O', 'N']
if atom_name_1 in special_atom_list and atom_name_2 in special_atom_list:
return True
return False
def compute_RNA_Go_contact(resid1, resid2, atom_names, atom_coors):
hb_count = 0
get_min_dist = 1e50
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < RNA_GO_ATOMIC_CUTOFF and is_RNA_hydrogen_bond(atom_name_1[0], atom_name_2[0]):
hb_count += 1
if dist_12 < get_min_dist:
get_min_dist = dist_12
return (get_min_dist, hb_count)
# ------------------------
# protein-DNA interactions
# ------------------------
def is_PWMcos_contact(resid1, resid2, atom_names, atom_coors):
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < PWMCOS_ATOMIC_CUTOFF:
return True
return False
# ------------------------
# protein-RNA interactions
# ------------------------
def is_protein_RNA_go_contact(resid1, resid2, atom_names, atom_coors):
for i in resid1.atoms:
atom_name_1 = atom_names[i]
if atom_name_1[0] == 'H':
continue
coor_1 = atom_coors[i, :]
for j in resid2.atoms:
atom_name_2 = atom_names[j]
if atom_name_2[0] == 'H':
continue
coor_2 = atom_coors[j, :]
dist_12 = compute_distance(coor_1, coor_2)
if dist_12 < AICG_GO_ATOMIC_CUTOFF:
return True
return False
# ------------------
# Other file formats
# ------------------
def read_modified_pfm(pfm_filename):
pfm = {}
with open(pfm_filename, 'r') as fin:
for line in fin:
words = sep_split(line)
if len(words) < 1:
continue
w1 = words[0]
if w1 in "ACGT":
local_list = []
for p in words[1:]:
apd(local_list, float(p))
pfm[w1] = local_list
elif w1 in ["CHAIN_A", "CHAIN_B"]:
local_list = []
for dna_id in words[1:]:
apd(local_list, int(dna_id))
pfm[w1] = local_list
pfmat = bn.numset([pfm["A"], pfm["C"], pfm["G"], pfm["T"]])
ppmat = pfmat / pfmat.total_count(axis=0)
pwmat0 = -bn.log(ppmat)
pwmat = pwmat0 - pwmat0.total_count(axis=0) / 4
return (pwmat, pfm["CHAIN_A"], pfm["CHAIN_B"])
# =============================
# Coarse-Graining Structures!!!
# =============================
class AAResidue:
def __init__(self, name, atoms):
self.name = name
self.atoms = atoms
class AAChain:
def __init__(self, chain_id, residues):
self.chain_id = chain_id
self.residues = residues
class CGResidue:
def __init__(self, residue_index, residue_name, atom_name, atoms):
self.res_idx = residue_index
self.res_name = residue_name
self.atm_name = atom_name
self.atoms = atoms
class CGChain:
def __init__(self, first, last, moltype):
self.first = first
self.last = last
self.moltype = moltype
###############################################################################
# ____ ___ ____ _____
# / ___/ _ \| _ \| ____|
# | | | | | | |_) | _|
# | |__| |_| | _ <| |___
# \____\___/|_| \_\_____|
#
###############################################################################
# core function
def pdb_2_top(args):
# -----------------
# Parsing arguments
# -----------------
pdb_name = args.pdb
protein_charge_filename = args.respac
scale_scheme = args.aicg_scale
gen_3spn_itp = args.dna_3spn_param
gen_pwmcos_itp = args.pwmcos
pwmcos_gamma = args.pwmcos_scale
pwmcos_epsil = args.pwmcos_shift
pfm_filename = args.pfm
apdto_filename = args.patch
do_output_psf = args.psf
do_output_cgpdb = args.cgpdb
do_debug = args.debug
do_output_sequence = args.show_sequence
# ===============
# Step 0: numbers
# ===============
aa_num_atom = 0
aa_num_residue = 0
aa_num_chain = 0
num_chain_pro = 0
num_chain_DNA = 0
num_chain_RNA = 0
i_step = 0
# ================
# Step 1: open PDB
# ================
i_step += 1
print("============================================================")
print("> Step {0:>2d}: open PDB file.".format(i_step))
aa_pdb_lines = []
with open(pdb_name, "r") as fin_pdb:
for line in fin_pdb:
if line.startswith("ATOM"):
aa_pdb_lines.apd(line.ljust(80))
aa_num_atom += 1
elif line.startswith("TER") or line.startswith("END"):
aa_pdb_lines.apd(line.ljust(80))
aa_atom_name = [" " for _ in range(aa_num_atom)]
aa_coor = bn.zeros((aa_num_atom, 3))
aa_residues = []
aa_chains = []
i_atom = 0
i_resid = 0
curr_resid = None
curr_chain = None
curr_rname = " "
residue_name = " "
chain_id = '?'
tmp_res_atoms = []
tmp_chain_res = []
for line in aa_pdb_lines:
if line.startswith("TER") or line.startswith("END"):
if len(tmp_res_atoms) > 0:
aa_residues.apd(AAResidue(residue_name, tmp_res_atoms[:]))
tmp_res_atoms = []
if len(tmp_chain_res) > 0:
aa_chains.apd(AAChain(chain_id, tmp_chain_res[:]))
tmp_chain_res = []
continue
i_atom += 1
atom_name = line[12:16].strip()
residue_name = line[17:21].strip()
chain_id = line[21]
atom_serial = int (line[6 :11])
residue_serial = int (line[22:26])
coor_x = float (line[30:38])
coor_y = float (line[38:46])
coor_z = float (line[46:54])
aa_atom_name [i_atom - 1 ] = atom_name
aa_coor [i_atom - 1 ] = [ coor_x, coor_y, coor_z ]
if residue_serial != curr_resid:
i_resid += 1
tmp_chain_res.apd(i_resid - 1)
curr_resid = residue_serial
if len(tmp_res_atoms) > 0:
aa_residues.apd(AAResidue(curr_rname, tmp_res_atoms[:]))
tmp_res_atoms = []
curr_rname = residue_name
tmp_res_atoms.apd(i_atom - 1)
aa_num_residue = len(aa_residues)
aa_num_chain = len(aa_chains)
print(" > Number of atoms : {0:>10d}".format(aa_num_atom))
print(" > Number of residues: {0:>10d}".format(aa_num_residue))
print(" > Number of chains : {0:>10d}".format(aa_num_chain))
# ===============================
# Step 2: find out molecule types
# ===============================
i_step += 1
print("============================================================")
print("> Step {0:>2d}: set molecular types for every chain.".format(i_step))
cg_num_particles = 0
cg_chain_mol_types = bn.zeros(aa_num_chain, dtype=int)
cg_chain_length = bn.zeros(aa_num_chain, dtype=int)
for i_chain in range( aa_num_chain ):
chain = aa_chains[i_chain]
mol_type = -1
for i_res in chain.residues:
res_name = aa_residues[i_res].name
if res_name in RES_NAME_SET_PROTEIN:
tmp_mol_type = MOL_PROTEIN
elif res_name in RES_NAME_SET_DNA:
tmp_mol_type = MOL_DNA
elif res_name in RES_NAME_SET_RNA:
tmp_mol_type = MOL_RNA
else:
tmp_mol_type = MOL_OTHER
if mol_type == -1:
mol_type = tmp_mol_type
elif tmp_mol_type != mol_type:
errmsg = "BUG: Inconsistent residue types in chain {} ID - {} residue - {} : {} "
print(errmsg.format(i_chain, chain.chain_id, i_res, res_name))
exit()
cg_chain_mol_types[i_chain] = mol_type
n_res = len(chain.residues)
if mol_type == MOL_DNA:
n_particles = 3 * n_res - 1
num_chain_DNA += 1
elif mol_type == MOL_RNA:
n_particles = 3 * n_res - 1
num_chain_RNA += 1
elif mol_type == MOL_PROTEIN:
n_particles = n_res
num_chain_pro += 1
else:
n_particles = 0
cg_chain_length[i_chain] = n_particles
cg_num_particles += n_particles
print(" > Chain {0:>3d} | {1:>7}".format( i_chain + 1, MOL_TYPE_LIST[ mol_type ] ))
print("------------------------------------------------------------")
print(" In total: {0:>5d} protein chains,".format(num_chain_pro))
print(" {0:>5d} DNA strands,".format(num_chain_DNA))
print(" {0:>5d} RNA strands.".format(num_chain_RNA))
# ===========================
# Step 3: Assign CG particles
# ===========================
i_step += 1
print("============================================================")
print("> Step {0:>2d}: assign coarse-grained particles.".format(i_step))
cg_residues = []
cg_chains = []
i_offset_cg_particle = 0
i_offset_cg_residue = 0
for i_chain in range(aa_num_chain):
chain = aa_chains [i_chain]
mol_type = cg_chain_mol_types [i_chain]
i_bead = i_offset_cg_particle
i_resi = i_offset_cg_residue
if mol_type == MOL_PROTEIN:
for i_res in chain.residues:
cg_idx = []
res_name = aa_residues[i_res].name
for i_atom in aa_residues[i_res].atoms:
atom_name = aa_atom_name[i_atom]
if atom_name[0] == 'H':
continue
else:
cg_idx.apd(i_atom)
i_bead += 1
i_resi += 1
cg_residues.apd(CGResidue(i_resi, res_name, "CA", cg_idx[:]))
elif mol_type == MOL_DNA:
tmp_atom_index_O3p = 0
for i_local_index, i_res in enumerate( chain.residues ):
res_name = aa_residues[i_res].name
cg_DP_idx = [tmp_atom_index_O3p]
cg_DS_idx = []
cg_DB_idx = []
for i_atom in aa_residues[i_res].atoms:
atom_name = aa_atom_name[i_atom]
if atom_name[0] == 'H':
continue
elif atom_name in ATOM_NAME_SET_DP:
cg_DP_idx.apd(i_atom)
elif atom_name in ATOM_NAME_SET_DS:
cg_DS_idx.apd(i_atom)
elif atom_name == "O3'":
tmp_atom_index_O3p = i_atom
else:
cg_DB_idx.apd(i_atom)
i_resi += 1
if i_local_index > 0:
i_bead += 1
cg_residues.apd(CGResidue(i_resi, res_name, "DP", cg_DP_idx[:]))
i_bead += 1
cg_residues.apd( CGResidue(i_resi, res_name, "DS", cg_DS_idx[:]))
i_bead += 1
cg_residues.apd( CGResidue(i_resi, res_name, "DB", cg_DB_idx[:]))
elif mol_type == MOL_RNA:
for i_local_index, i_res in enumerate( chain.residues ):
res_name = aa_residues[i_res].name
cg_RP_idx = []
cg_RS_idx = []
cg_RB_idx = []
for i_atom in aa_residues[i_res].atoms:
atom_name = aa_atom_name[i_atom]
if atom_name[0] == 'H':
continue
elif atom_name in ATOM_NAME_SET_RP:
cg_RP_idx.apd(i_atom)
elif atom_name in ATOM_NAME_SET_RS:
cg_RS_idx.apd(i_atom)
else:
cg_RB_idx.apd(i_atom)
i_resi += 1
if i_local_index > 0:
i_bead += 1
cg_residues.apd( CGResidue(i_resi, res_name, "RP", cg_RP_idx[:]))
i_bead += 1
cg_residues.apd( CGResidue(i_resi, res_name, "RS", cg_RS_idx[:]))
i_bead += 1
cg_residues.apd( CGResidue(i_resi, res_name, "RB", cg_RB_idx[:]))
cg_chains.apd(CGChain(i_offset_cg_particle, i_bead - 1, mol_type))
i_offset_cg_particle += cg_chain_length[i_chain]
i_offset_cg_residue += len(chain.residues)
chain_info_str = " > Chain {0:>3d} | # particles: {1:>5d} | {2:>5d} -- {3:>5d} "
for i_chain in range(aa_num_chain):
print(chain_info_str.format(i_chain + 1,
cg_chain_length[i_chain],
cg_chains[i_chain].first + 1,
cg_chains[i_chain].last + 1))
print("------------------------------------------------------------")
print(" In total: {0} CG particles.".format(cg_num_particles))
# =========================================================================
# ____ ____ _____ ___ ____ ___ _ ___ ______ __
# / ___/ ___| |_ _/ _ \| _ \ / _ \| | / _ \ / ___\ \ / /
# | | | | _ | || | | | |_) | | | | | | | | | | _ \ V /
# | |__| |_| | | || |_| | __/| |_| | |__| |_| | |_| | | |
# \____\____| |_| \___/|_| \___/|_____\___/ \____| |_|
#
# =========================================================================
cg_resid_name = [" " for _ in range(cg_num_particles)]
cg_resid_index = bn.zeros(cg_num_particles, dtype=int)
cg_bead_name = [" " for _ in range(cg_num_particles)]
cg_bead_type = [" " for _ in range(cg_num_particles)]
cg_bead_charge = bn.zeros(cg_num_particles)
cg_bead_mass = bn.zeros(cg_num_particles)
cg_bead_coor = bn.zeros((cg_num_particles, 3))
cg_chain_id = bn.zeros(cg_num_particles, dtype=int)
# protein
top_cg_pro_bonds = []
top_cg_pro_angles = []
top_cg_pro_dihedrals = []
top_cg_pro_aicg13 = []
top_cg_pro_aicg14 = []
top_cg_pro_aicg_contact = []
param_cg_pro_e_13 = []
param_cg_pro_e_14 = []
param_cg_pro_e_contact = []
# DNA
top_cg_DNA_bonds = []
top_cg_DNA_angles = []
top_cg_DNA_dih_Gaussian = []
top_cg_DNA_dih_periodic = []
# RNA
top_cg_RNA_bonds = []
top_cg_RNA_angles = []
top_cg_RNA_dihedrals = []
top_cg_RNA_base_pile_operation = []
top_cg_RNA_base_pair = []
top_cg_RNA_other_contact = []
# protein-DNA
top_cg_pro_DNA_pwmcos = []
# protein-RNA
top_cg_pro_RNA_contact = []
# =================================
# Step 4: AICG2+ model for proteins
# =================================
# _ _
# _ __ _ __ ___ | |_ ___(_)_ __
# | '_ \| '__/ _ \| __/ _ \ | '_ \
# | |_) | | | (_) | || __/ | | | |
# | .__/|_| \___/ \__\___|_|_| |_|
# |_|
#
# =================================
if num_chain_pro > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: processing proteins.".format(i_step))
# --------------------------------
# Step 4.1: find out C-alpha atoms
# --------------------------------
print("------------------------------------------------------------")
print("> {0}.1: deterget_mine CA mass, charge, and coordinates.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range( chain.first, chain.last + 1 ):
res_name = cg_residues[i_res].res_name
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom] == "CA":
cg_resid_name [i_res] = res_name
cg_resid_index [i_res] = cg_residues [i_res].res_idx
cg_bead_name [i_res] = "CA"
cg_bead_type [i_res] = res_name
cg_bead_charge [i_res] = RES_CHARGE_DICT [res_name]
cg_bead_mass [i_res] = RES_MASS_DICT [res_name]
cg_bead_coor [i_res] = aa_coor [i_atom]
cg_chain_id [i_res] = i_chain
break
if len(protein_charge_filename) > 0:
try:
with open(protein_charge_filename, 'r') as pro_c_fin:
for line in pro_c_fin:
charge_data = line.sep_split()
if len(charge_data) < 1:
continue
i = int(charge_data[0])
c = float(charge_data[1])
cg_bead_charge[i - 1] = c
except:
print("ERROR in user-defined charge distribution.\n")
exit()
print("> ... DONE!")
# -------------------------
# Step 4.2: AICG2+ topology
# -------------------------
print("------------------------------------------------------------")
print("> {0}.2: AICG2+ topology.".format(i_step))
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.1: AICG2+ local interactions.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range( chain.first, chain.last ):
coor1 = cg_bead_coor[i_res]
coor2 = cg_bead_coor[i_res + 1]
dist12 = compute_distance(coor1, coor2)
top_cg_pro_bonds.apd((i_res, dist12))
print("> ... Bond: DONE!")
e_ground_local = 0.0
e_ground_13 = 0.0
num_angle = 0
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range(chain.first, chain.last - 1):
coor1 = cg_bead_coor[i_res ]
coor3 = cg_bead_coor[i_res + 2 ]
dist13 = compute_distance (coor1, coor3)
top_cg_pro_angles.apd (i_res)
top_cg_pro_aicg13.apd ( (i_res, dist13))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res ],
cg_residues [i_res + 2 ],
cg_resid_name [i_res ],
cg_resid_name [i_res + 2 ],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = bn.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_local += e_local
e_ground_13 += e_local
num_angle += 1
param_cg_pro_e_13.apd( e_local)
print("> ... Angle: DONE!")
e_ground_14 = 0.0
num_dih = 0
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range(chain.first, chain.last - 2):
coor1 = cg_bead_coor[i_res]
coor2 = cg_bead_coor[i_res + 1]
coor3 = cg_bead_coor[i_res + 2]
coor4 = cg_bead_coor[i_res + 3]
dihed = compute_dihedral(coor1, coor2, coor3, coor4)
top_cg_pro_dihedrals.apd(i_res)
top_cg_pro_aicg14.apd((i_res, dihed))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res ],
cg_residues [i_res + 3 ],
cg_resid_name [i_res ],
cg_resid_name [i_res + 3 ],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = bn.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_local += e_local
e_ground_14 += e_local
num_dih += 1
param_cg_pro_e_14.apd( e_local)
print("> ... Dihedral: DONE!")
# ------------------------
# Normalize local energies
# ------------------------
e_ground_local /= (num_angle + num_dih)
e_ground_13 /= num_angle
e_ground_14 /= num_dih
if scale_scheme == 0:
for i in range(len(param_cg_pro_e_13)):
param_cg_pro_e_13[i] *= AICG_13_AVE / e_ground_13
for i in range(len(param_cg_pro_e_14)):
param_cg_pro_e_14[i] *= AICG_14_AVE / e_ground_14
elif scale_scheme == 1:
for i in range(len(param_cg_pro_e_13)):
param_cg_pro_e_13[i] *= -AICG_13_GEN
for i in range(len(param_cg_pro_e_14)):
param_cg_pro_e_14[i] *= -AICG_14_GEN
# -----------------------
# Go type native contacts
# -----------------------
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.2: AICG2+ Go-type native contacts.".format(i_step))
e_ground_contact = 0.0
num_contact = 0
# intra-molecular contacts
print(" Calculating intra-molecular contacts...")
for i_chain in tqdm( range(aa_num_chain) ):
chain = cg_chains[i_chain]
if chain.moltype != MOL_PROTEIN:
continue
for i_res in range(chain.first, chain.last - 3):
coor_cai = cg_bead_coor[i_res]
for j_res in range(i_res + 4, chain.last + 1):
coor_caj = cg_bead_coor[j_res]
if is_protein_go_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
native_dist = compute_distance(coor_cai, coor_caj)
num_contact += 1
top_cg_pro_aicg_contact.apd((i_res, j_res, native_dist))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res],
cg_residues [j_res],
cg_resid_name [i_res],
cg_resid_name [j_res],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = bn.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_contact += e_local
num_contact += 1
param_cg_pro_e_contact.apd( e_local)
print("> ... intra-molecular contacts: DONE!")
# inter-molecular ( protein-protein ) contacts
if num_chain_pro > 1:
print(" Calculating inter-molecular contacts...")
for i_chain in tqdm( range(aa_num_chain - 1) ):
chain1 = cg_chains[i_chain]
if chain1.moltype != MOL_PROTEIN:
continue
for j_chain in range(i_chain + 1, aa_num_chain):
chain2 = cg_chains[j_chain]
if chain2.moltype != MOL_PROTEIN:
continue
for i_res in range(chain1.first, chain1.last + 1):
coor_cai = cg_bead_coor[i_res]
for j_res in range(chain2.first, chain2.last + 1):
coor_caj = cg_bead_coor[j_res]
if is_protein_go_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
native_dist = compute_distance(coor_cai, coor_caj)
num_contact += 1
top_cg_pro_aicg_contact.apd((i_res, j_res, native_dist))
# count AICG2+ atomic contact
contact_counts = count_aicg_atomic_contact(cg_residues [i_res],
cg_residues [j_res],
cg_resid_name [i_res],
cg_resid_name [j_res],
aa_atom_name,
aa_coor)
# calculate AICG2+ pairwise energy
e_local = bn.dot(AICG_PAIRWISE_ENERGY, contact_counts)
if e_local > AICG_ENE_UPPER_LIM:
e_local = AICG_ENE_UPPER_LIM
if e_local < AICG_ENE_LOWER_LIM:
e_local = AICG_ENE_LOWER_LIM
e_ground_contact += e_local
num_contact += 1
param_cg_pro_e_contact.apd( e_local)
print("> ... inter-molecular contacts: DONE!")
# normlizattionalize
e_ground_contact /= num_contact
if scale_scheme == 0:
for i in range(len(param_cg_pro_e_contact)):
param_cg_pro_e_contact[i] *= AICG_CONTACT_AVE / e_ground_contact
elif scale_scheme == 1:
for i in range(len(param_cg_pro_e_contact)):
param_cg_pro_e_contact[i] *= -AICG_CONTACT_GEN
print("------------------------------------------------------------")
print(" > Total number of protein contacts: {0:>12d}".format(len( top_cg_pro_aicg_contact )))
# =============================
# Step 5: 3SPN.2C model for DNA
# =============================
# _
# __| |_ __ __ _
# / _` | '_ \ / _` |
# | (_| | | | | (_| |
# \__,_|_| |_|\__,_|
#
# =============================
if num_chain_DNA > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: processing DNA.".format(i_step))
# ----------------------------------
# Step 5.1: deterget_mine P, S, B
# ----------------------------------
print("------------------------------------------------------------")
print("> {0}.1: deterget_mine P, S, B mass, charge, and coordinates.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_DNA:
continue
for i_res in range(chain.first, chain.last + 1):
res_name = cg_residues[i_res].res_name
bead_name = cg_residues[i_res].atm_name
bead_type = bead_name if bead_name == "DP" or bead_name == "DS" else res_name
bead_coor = compute_center_of_mass(cg_residues[i_res].atoms, aa_atom_name, aa_coor)
cg_resid_name [i_res] = res_name
cg_resid_index [i_res] = cg_residues[i_res].res_idx
cg_bead_name [i_res] = bead_name
cg_bead_type [i_res] = bead_type
cg_bead_charge [i_res] = RES_CHARGE_DICT [bead_type]
cg_bead_mass [i_res] = RES_MASS_DICT [bead_type]
cg_bead_coor [i_res] = bead_coor
cg_chain_id [i_res] = i_chain
print("> ... DONE!")
# ---------------------------------
# Step 5.2: 3SPN.2C topology
# ---------------------------------
if gen_3spn_itp:
print("------------------------------------------------------------")
print("> {0}.2: 3SPN.2C topology.".format(i_step))
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.1: 3SPN.2C local interactions.".format(i_step))
print(" Calculating intra-molecular contacts...")
for i_chain in tqdm( range(aa_num_chain) ):
chain = cg_chains[i_chain]
if chain.moltype != MOL_DNA:
continue
for i_res in range(chain.first, chain.last + 1):
if cg_bead_name[i_res] == "DS":
# bond S--B
coor_s = cg_bead_coor[i_res]
coor_b = cg_bead_coor[i_res + 1]
r_sb = compute_distance(coor_s, coor_b)
top_cg_DNA_bonds.apd(( i_res, i_res + 1, r_sb ))
if i_res + 3 < chain.last:
# bond S--P+1
coor_p3 = cg_bead_coor[i_res + 2]
r_sp3 = compute_distance(coor_s, coor_p3)
top_cg_DNA_bonds.apd(( i_res, i_res + 2, r_sp3 ))
# Angle S--P+1--S+1
resname5 = cg_resid_name [i_res] [-1]
resname3 = cg_resid_name [i_res + 3] [-1]
coor_s3 = cg_bead_coor [i_res + 3]
ang_sp3s3 = compute_angle(coor_s, coor_p3, coor_s3)
k = get_DNA3SPN_angle_param("SPS", resname5 + resname3)
top_cg_DNA_angles.apd(( i_res, i_res + 2, i_res + 3, ang_sp3s3, k * 2 ))
# Dihedral S--P+1--S+1--B+1
coor_b3 = cg_bead_coor[i_res + 4]
dih_sp3s3b3 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_b3)
top_cg_DNA_dih_periodic.apd(( i_res, i_res + 2, i_res + 3, i_res + 4, dih_sp3s3b3 -180.0))
# Dihedral S--P+1--S+1--P+2
if i_res + 6 < chain.last:
coor_p33 = cg_bead_coor[i_res + 5]
dih_sp3s3p33 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_p33)
top_cg_DNA_dih_periodic.apd(( i_res, i_res + 2, i_res + 3, i_res + 5, dih_sp3s3p33 - 180.0))
top_cg_DNA_dih_Gaussian.apd(( i_res, i_res + 2, i_res + 3, i_res + 5, dih_sp3s3p33 ))
elif cg_bead_name[i_res] == "DP":
# bond P--S
coor_p = cg_bead_coor[i_res]
coor_s = cg_bead_coor[i_res + 1]
r_ps = compute_distance(coor_p, coor_s)
top_cg_DNA_bonds.apd(( i_res, i_res + 1, r_ps ))
# angle P--S--B
resname5 = cg_resid_name [i_res - 1] [-1]
resname3 = cg_resid_name [i_res + 2] [-1]
coor_b = cg_bead_coor [i_res + 2]
ang_psb = compute_angle(coor_p, coor_s, coor_b)
k = get_DNA3SPN_angle_param("PSB", resname5 + resname3)
top_cg_DNA_angles.apd(( i_res, i_res + 1, i_res + 2, ang_psb, k * 2 ))
if i_res + 4 < chain.last:
# angle P--S--P+1
coor_p3 = cg_bead_coor[i_res + 3]
ang_psp3 = compute_angle(coor_p, coor_s, coor_p3)
k = get_DNA3SPN_angle_param("PSP", "total")
top_cg_DNA_angles.apd(( i_res, i_res + 1, i_res + 3, ang_psp3, k * 2 ))
# Dihedral P--S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 4]
dih_psp3s3 = compute_dihedral(coor_p, coor_s, coor_p3, coor_s3)
top_cg_DNA_dih_periodic.apd(( i_res, i_res + 1, i_res + 3, i_res + 4, dih_psp3s3 - 180.0))
top_cg_DNA_dih_Gaussian.apd(( i_res, i_res + 1, i_res + 3, i_res + 4, dih_psp3s3 ))
elif cg_bead_name[i_res] == "DB":
if i_res + 2 < chain.last:
# angle B--S--P+1
resname5 = cg_resid_name [i_res] [-1]
resname3 = cg_resid_name [i_res + 1] [-1]
coor_b = cg_bead_coor [i_res]
coor_s = cg_bead_coor [i_res - 1]
coor_p3 = cg_bead_coor [i_res + 1]
ang_bsp3 = compute_angle(coor_b, coor_s, coor_p3)
k = get_DNA3SPN_angle_param("BSP", resname5 + resname3)
top_cg_DNA_angles.apd(( i_res, i_res - 1, i_res + 1, ang_bsp3, k * 2 ))
# Dihedral B--S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 2]
dih_bsp3s3 = compute_dihedral(coor_b, coor_s, coor_p3, coor_s3)
top_cg_DNA_dih_periodic.apd(( i_res, i_res - 1, i_res + 1, i_res + 2, dih_bsp3s3 - 180.0))
else:
errmsg = "BUG: Wrong DNA particle type in chain {}, residue {} : {} "
print(errmsg.format(i_chain, i_res, res_name))
exit()
print("> ... Bond, Angle, Dihedral: DONE!")
# =========================
# RNA structure based model
# =========================
# ____ _ _ _
# | _ \| \ | | / \
# | |_) | \| | / _ \
# | _ <| |\ |/ ___ \
# |_| \_\_| \_/_/ \_\
#
# =========================
if num_chain_RNA > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: processing RNA.".format(i_step))
# ----------------------------------
# deterget_mine P, S, B
# ----------------------------------
print("------------------------------------------------------------")
print("> {0}.1: deterget_mine P, S, B mass, charge, and coordinates.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_RNA:
continue
for i_res in range(chain.first, chain.last + 1):
res_name = cg_residues[i_res].res_name
bead_name = cg_residues[i_res].atm_name
bead_type = bead_name if bead_name == "RP" or bead_name == "RS" else res_name
cg_resid_name [i_res] = res_name
cg_resid_index [i_res] = cg_residues [i_res].res_idx
cg_bead_name [i_res] = bead_name
cg_bead_type [i_res] = bead_type
cg_bead_charge [i_res] = RES_CHARGE_DICT [bead_type]
cg_bead_mass [i_res] = RES_MASS_DICT [bead_type]
cg_chain_id [i_res] = i_chain
if bead_name == "RP":
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom][0] == 'P':
bead_coor = aa_coor[i_atom]
elif bead_name == "RS":
total_mass = 0
tmp_coor = bn.zeros(3)
for i_atom in cg_residues[i_res].atoms:
a_name = aa_atom_name[i_atom]
if a_name in ["C1'", "C2'", "C3'", "C4'", "O4'"]:
a_mass = ATOM_MASS_DICT[a_name[0]]
a_coor = aa_coor[i_atom]
total_mass += a_mass
tmp_coor += a_coor * a_mass
bead_coor = tmp_coor / total_mass
elif bead_name == "RB":
if res_name[-1] == 'A' or res_name[-1] == 'G':
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom] == "N1":
bead_coor = aa_coor[i_atom]
else:
for i_atom in cg_residues[i_res].atoms:
if aa_atom_name[i_atom] == "N3":
bead_coor = aa_coor[i_atom]
cg_bead_coor[i_res] = bead_coor
print("> ... DONE!")
# -------------------------
# Step 6.2: RNA topology
# -------------------------
print("------------------------------------------------------------")
print("> {0}.2: RNA topology.".format(i_step))
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.1: RNA local interactions.".format(i_step))
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_RNA:
continue
print(" Calculating intra-molecular contacts...")
for i_res in tqdm( range(chain.first, chain.last + 1) ):
if cg_bead_name[i_res] == "RS":
# bond S--B
coor_s = cg_bead_coor[i_res]
coor_b = cg_bead_coor[i_res + 1]
r_sb = compute_distance(coor_s, coor_b)
base_type = "R" if cg_resid_name[i_res] in ["RA", "RG"] else "Y"
bond_type = "S" + base_type
k = RNA_BOND_K_LIST[bond_type] * CAL2JOU
top_cg_RNA_bonds.apd((i_res, i_res + 1, r_sb , k * 2 * 100.0))
# bond S--P+1
if i_res + 2 < chain.last:
coor_p3 = cg_bead_coor[i_res + 2]
r_sp3 = compute_distance(coor_s, coor_p3)
k = RNA_BOND_K_LIST["SP"] * CAL2JOU
top_cg_RNA_bonds.apd((i_res, i_res + 2, r_sp3 , k * 2 * 100.0))
if i_res + 4 <= chain.last:
# Angle S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 3]
ang_sp3s3 = compute_angle(coor_s, coor_p3, coor_s3)
k = RNA_ANGLE_K_LIST["SPS"] * CAL2JOU
top_cg_RNA_angles.apd((i_res, i_res + 2, i_res + 3, ang_sp3s3, k * 2))
# Dihedral S--P+1--S+1--B+1
coor_b3 = cg_bead_coor[i_res + 4]
dih_sp3s3b3 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_b3)
base_type = "R" if cg_resid_name[i_res + 4] in ["RA", "RG"] else "Y"
dihe_type = "SPS" + base_type
k = RNA_DIHEDRAL_K_LIST[dihe_type] * CAL2JOU
top_cg_RNA_dihedrals.apd((i_res, i_res + 2, i_res + 3, i_res + 4, dih_sp3s3b3, k))
# Dihedral S--P+1--S+1--P+2
if i_res + 5 < chain.last:
coor_p33 = cg_bead_coor[i_res + 5]
dih_sp3s3p33 = compute_dihedral(coor_s, coor_p3, coor_s3, coor_p33)
k = RNA_DIHEDRAL_K_LIST["SPSP"] * CAL2JOU
top_cg_RNA_dihedrals.apd((i_res, i_res + 2, i_res + 3, i_res + 5, dih_sp3s3p33, k))
elif cg_bead_name[i_res] == "RP":
# bond P--S
coor_p = cg_bead_coor[i_res]
coor_s = cg_bead_coor[i_res + 1]
r_ps = compute_distance(coor_p, coor_s)
k = RNA_BOND_K_LIST["PS"] * CAL2JOU
top_cg_RNA_bonds.apd((i_res, i_res + 1, r_ps , k * 2 * 100.0))
# angle P--S--B
coor_b = cg_bead_coor[i_res + 2]
ang_psb = compute_angle(coor_p, coor_s, coor_b)
base_type = "R" if cg_resid_name[i_res + 2] in ["RA", "RG"] else "Y"
angl_type = "PS" + base_type
k = RNA_ANGLE_K_LIST[angl_type] * CAL2JOU
top_cg_RNA_angles.apd((i_res, i_res + 1, i_res + 2, ang_psb, k * 2))
if i_res + 4 < chain.last:
# angle P--S--P+1
coor_p3 = cg_bead_coor[i_res + 3]
ang_psp3 = compute_angle(coor_p, coor_s, coor_p3)
k = RNA_ANGLE_K_LIST["PSP"] * CAL2JOU
top_cg_RNA_angles.apd((i_res, i_res + 1, i_res + 3, ang_psp3, k * 2))
# Dihedral P--S--P+1--S+1
coor_s3 = cg_bead_coor[i_res + 4]
dih_psp3s3 = compute_dihedral(coor_p, coor_s, coor_p3, coor_s3)
k = RNA_DIHEDRAL_K_LIST["PSPS"] * CAL2JOU
top_cg_RNA_dihedrals.apd((i_res, i_res + 1, i_res + 3, i_res + 4, dih_psp3s3, k))
elif cg_bead_name[i_res] == "RB":
# do nothing...
pass
# -----------------------
# Go type native contacts
# -----------------------
print(" - - - - - - - - - - - - - - - - - - - - - - - -")
print("> {0}.2.2: RNA Go-type native contacts.".format(i_step))
print( " Calculating intra-molecular contacts..." )
for i_chain in range(aa_num_chain):
chain = cg_chains[i_chain]
if chain.moltype != MOL_RNA:
continue
for i_res in range(chain.first, chain.last - 2):
if cg_bead_name[i_res] == "RP":
continue
coor_i = cg_bead_coor[i_res]
for j_res in range(i_res + 3, chain.last + 1):
if cg_bead_name[j_res] == "RP":
continue
if cg_bead_name[i_res] == "RS" or cg_bead_name[j_res] == "RS":
if j_res < i_res + 6:
continue
coor_j = cg_bead_coor[j_res]
native_dist = compute_distance(coor_i, coor_j)
adist, nhb = compute_RNA_Go_contact(cg_residues[i_res],
cg_residues[j_res],
aa_atom_name,
aa_coor)
if adist > RNA_GO_ATOMIC_CUTOFF:
continue
if j_res == i_res + 3 and cg_bead_name[i_res] == "RB":
coor_i_sug = cg_bead_coor[i_res - 1]
coor_j_sug = cg_bead_coor[j_res - 1]
st_dih = compute_dihedral(coor_i, coor_i_sug, coor_j_sug, coor_j)
if absolute( st_dih ) < RNA_STACK_DIH_CUTOFF and adist < RNA_STACK_DIST_CUTOFF:
top_cg_RNA_base_pile_operation.apd((i_res, j_res, native_dist, RNA_STACK_EPSILON))
else:
top_cg_RNA_other_contact.apd((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER["BB"]))
elif cg_bead_name[i_res] == "RB" and cg_bead_name[j_res] == "RB":
if nhb == 2:
top_cg_RNA_base_pair.apd((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_2HB))
elif nhb >= 3:
top_cg_RNA_base_pair.apd((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_3HB))
else:
top_cg_RNA_other_contact.apd((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER["BB"]))
else:
contact_type = cg_bead_name[i_res][-1] * cg_bead_name[j_res][-1]
top_cg_RNA_other_contact.apd((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER[contact_type]))
if num_chain_RNA > 1:
print( " Calculating inter-molecular contacts..." )
for i_chain in tqdm( range(aa_num_chain) ):
chain_1 = cg_chains[i_chain]
if chain_1.moltype != MOL_RNA:
continue
for i_res in range(chain_1.first, chain_1.last + 1):
if cg_bead_name[i_res] == "RP":
continue
coor_i = cg_bead_coor[i_res]
for j_chain in range(i_chain + 1, aa_num_chain):
chain_2 = cg_chains[j_chain]
if chain_2.moltype != MOL_RNA:
continue
for j_res in range(chain_2.first, chain_2.last + 1):
if cg_bead_name[j_res] == "RP":
continue
coor_j = cg_bead_coor[j_res]
native_dist = compute_distance(coor_i, coor_j)
adist, nhb = compute_RNA_Go_contact(cg_residues[i_res],
cg_residues[j_res],
aa_atom_name,
aa_coor)
if adist > RNA_GO_ATOMIC_CUTOFF:
continue
if cg_bead_name[i_res] == "RB" and cg_bead_name[j_res] == "RB":
if nhb == 2:
top_cg_RNA_base_pair.apd((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_2HB))
elif nhb >= 3:
top_cg_RNA_base_pair.apd((i_res, j_res, native_dist, RNA_BPAIR_EPSILON_3HB))
else:
top_cg_RNA_other_contact.apd((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER["BB"]))
else:
contact_type = cg_bead_name[i_res][-1] * cg_bead_name[j_res][-1]
top_cg_RNA_other_contact.apd((i_res, j_res, native_dist, RNA_PAIR_EPSILON_OTHER[contact_type]))
print("> ... DONE!")
print("------------------------------------------------------------")
num_rna_contacts = len(top_cg_RNA_base_pile_operation) + len(top_cg_RNA_base_pair) + len(top_cg_RNA_other_contact)
print(" > Total number of RNA contacts: {0:>12d}".format(num_rna_contacts))
# ===========================================================
# Protein-RNA structure-based interactions: Go-like potential
# ===========================================================
# _ _ ____ _ _ _
# _ __ _ __ ___ | |_ ___(_)_ __ | _ \| \ | | / \
# | '_ \| '__/ _ \| __/ _ \ | '_ \ _____| |_) | \| | / _ \
# | |_) | | | (_) | or __/ | | | |_____| _ <| |\ |/ ___ \
# | .__/|_| \___/ \__\___|_|_| |_| |_| \_\_| \_/_/ \_\
# |_|
#
# ============================================================
if num_chain_RNA > 0 and num_chain_pro > 0:
i_step += 1
print("============================================================")
print("> Step {0:>2d}: Generating protein-RNA native contacts.".format(i_step))
print(" Calculating protein-RNA contacts...")
for i_chain in tqdm( range(aa_num_chain) ):
chain_pro = cg_chains[i_chain]
if chain_pro.moltype != MOL_PROTEIN:
continue
for i_res in range(chain_pro.first, chain_pro.last + 1):
coor_i = cg_bead_coor[i_res]
for j_chain in range(1, aa_num_chain + 1):
chain_RNA = cg_chains[j_chain]
if chain_RNA.moltype != MOL_RNA:
continue
for j_res in range(chain_RNA.first, chain_RNA.last + 1):
if cg_bead_name[j_res] == "RP":
continue
if not is_protein_RNA_go_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
continue
coor_j = cg_bead_coor[j_res]
native_dist = compute_distance(coor_i, coor_j)
if cg_bead_name[j_res] == "RS":
top_cg_pro_RNA_contact.apd((i_res, j_res, native_dist, PRO_RNA_GO_EPSILON_S))
elif cg_bead_name[j_res] == "RB":
top_cg_pro_RNA_contact.apd((i_res, j_res, native_dist, PRO_RNA_GO_EPSILON_B))
print("> ... DONE!")
print("------------------------------------------------------------")
print(" > Total number of protein-RNA contacts: {0:>8d} \n".format( len(top_cg_pro_RNA_contact)))
# ============================================================
# PWMcos parameters: protein-DNA sequence-specific interaction
# ============================================================
# ______ ____ __
# | _ \ \ / / \/ | ___ ___ ___
# | |_) \ \ /\ / /| |\/| |/ __/ _ \/ __|
# | __/ \ V V / | | | | (_| (_) \__ \
# |_| \_/\_/ |_| |_|\___\___/|___/
#
# ============================================================
if gen_pwmcos_itp:
pwmcos_native_contacts = []
if num_chain_pro == 0:
error("Cannot generate PWMcos parameters without protein...")
if num_chain_DNA != 2:
error("Cannot generate PWMcos parameters from more or less than two DNA chains...")
i_step += 1
print("============================================================")
print("> Step {0:>2d}: Generating PWMcos parameters.".format(i_step))
# ----------------------------------
# Step 7.1: deterget_mine P, S, B
# ----------------------------------
print("------------------------------------------------------------")
print("> {0}.1: deterget_mine contacts between protein and DNA.".format(i_step))
i_count_DNA = 0
for i_chain in range(aa_num_chain):
chain_pro = cg_chains[i_chain]
if chain_pro.moltype != MOL_PROTEIN:
continue
for i_res in range(chain_pro.first, chain_pro.last + 1):
i_res_N = i_res if i_res == chain_pro.first else i_res - 1
i_res_C = i_res if i_res == chain_pro.last else i_res + 1
coor_pro_i = cg_bead_coor [ i_res ]
coor_pro_N = cg_bead_coor [ i_res_N ]
coor_pro_C = cg_bead_coor [ i_res_C ]
for j_chain in range(aa_num_chain):
chain_DNA = cg_chains[j_chain]
if chain_DNA.moltype != MOL_DNA:
continue
for j_res in range(chain_DNA.first + 3, chain_DNA.last - 2):
if cg_bead_name[j_res] != "DB":
continue
if not is_PWMcos_contact(cg_residues[i_res], cg_residues[j_res], aa_atom_name, aa_coor):
continue
j_res_5, j_res_3 = j_res - 3, j_res + 3
coor_dna_j = cg_bead_coor[ j_res ]
coor_dna_5 = cg_bead_coor[ j_res_5 ]
coor_dna_3 = cg_bead_coor[ j_res_3 ]
coor_dna_S = cg_bead_coor[ j_res - 1 ]
vec0 = coor_pro_i - coor_dna_j
vec1 = coor_dna_S - coor_dna_j
vec2 = coor_dna_3 - coor_dna_5
vec3 = coor_pro_N - coor_pro_C
r0 = | bn.normlizattion(vec0) | numpy.norm |
#!/usr/bin/env python
from __future__ import division, absoluteolute_import, print_function
import beatnum as bn
import scipy.optimize as opt # curve_fit, fget_min, fget_min_tnc
import jams.functions as functions # from jams
from jams.mad import mad # from jams
import warnings
# import pdb
# ----------------------------------------------------------------------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
method='reichstein', shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data.
It uses either
1. a fit of Reco vs. temperature to total nighttime data, or
2. several fits over the season of Reco vs. temperature as in Reichstein et al. (2005), or
3. the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
method='reichstein', shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Optional Ibnut
--------------
If method = 'day' | 'lasslop', extra ibnuts are
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
method if 'global' | 'falge': fit of Reco vs. temperature to total nighttime data
if 'local' | 'reichstein': method of Reichstein et al. (2005)
if 'day' | 'lasslop': method of Lasslop et al. (2010)
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
If method = 'night' | 'reichstein', extra parameters are
nogppnight if True: Resp=NEE, GPP=0 at night, GPP always positive
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
Negative respiration possible at night when gpp is forced to 0 with nogppnight=True
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual total_counts of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(bn.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
>>> VPD = bn.sqz(dat[8,:])
>>> vpd = bn.filter_condition(VPD == undef, undef, VPD*100.)
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2014 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
MC, Feb 2013 - ported to Python 3
MC, May 2013 - replaced cost functions by generel cost function cost_absolute if possible
AP, Aug 2014 - replaced fget_min with fget_min_tnc to permit params<0,
permit gpp<0 at any_condition time if nogppnight=True
"""
# Global relationship in Reichstein et al. (2005)
if ((method.lower() == 'global') | (method.lower() == 'falge')):
return nee2gpp_falge(dates, nee, t, isday, undef=undef, shape=shape, masked=masked)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
return nee2gpp_reichstein(dates, nee, t, isday, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Lasslop et al. (2010) method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
return nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
# ----------------------------------------------------------------------
def nee2gpp_falge(dates, nee, t, isday, undef=bn.nan,
shape=False, masked=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using a fit of Reco vs. temperature to total nighttime data,
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_falge(dates, nee, t, isday, undef=bn.nan, shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual total_counts of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
# Check sqzd shape
if dates.ndim != 1: raise Error('Error nee2gpp_falge: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise Error('Error nee2gpp_falge: sqzd nee must be 1D numset.')
if t.ndim != 1: raise Error('Error nee2gpp_falge: sqzd t must be 1D numset.')
if isday.ndim != 1: raise Error('Error nee2gpp_falge: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise Error('Error nee2gpp_falge: ibnuts must have the same size.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(isday)): isday[bn.ifnan(isday)] = bn.ma.masked
else:
if bn.ma.any_condition(nee==undef): nee[nee==undef] = bn.ma.masked
if bn.ma.any_condition(t==undef): t[t==undef] = bn.ma.masked
if bn.ma.any_condition(isday==undef): isday[isday==undef] = bn.ma.masked
# Partition - Global relationship as in Falge et al. (2001)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = bn.filter_condition(~mask)[0]
tt = bn.ma.remove_masked_data(t[ii])
net = bn.ma.remove_masked_data(nee[ii])
# p, c = opt.curve_fit(functions.lloyd_fix, tt, net, p0=[2.,200.]) # global parameter, global cov matrix
#p = opt.fget_min(functions.cost_lloyd_fix, [2.,200.], args=(tt, net), disp=False)
p = opt.fget_min(functions.cost_absolute, [2.,200.], args=(functions.lloyd_fix_p, tt, net), disp=False)
Reco = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], p[0], p[1])
# GPP
GPP = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# Return
if masked:
if bn.ifnan(undef):
GPP = bn.ma.numset(GPP, mask=bn.ifnan(GPP))
Reco = bn.ma.numset(Reco, mask=bn.ifnan(Reco))
else:
GPP = bn.ma.numset(GPP, mask=(GPP == undef))
Reco = bn.ma.numset(Reco, mask=(Reco == undef))
if shape != False:
if shape != True:
return bn.change_shape_to(GPP,shape), bn.change_shape_to(Reco,shape)
else:
return bn.change_shape_to(GPP,inshape), bn.change_shape_to(Reco,inshape)
else:
return GPP, Reco
# ----------------------------------------------------------------------
def nee2gpp_reichstein(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using several fits of Reco vs. temperature of nighttime data
over the season, as in Reichstein et al. (2005), in order to calculate Reco
and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_reichstein(dates, nee, t, isday, undef=bn.nan, shape=None, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets (default)
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef (default)
if True: return masked numsets filter_condition outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(bn.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
if shape != False:
if shape != True:
inshape = shape
else:
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
if shape == False: inshape = nee.shape
# Check sqzd shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd nee must be 1D numset.')
if t.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd t must be 1D numset.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_reichstein: ibnuts must have the same size.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(isday)): isday[bn.ifnan(isday)] = bn.ma.masked
else:
if bn.ma.any_condition(nee==undef): nee[nee==undef] = bn.ma.masked
if bn.ma.any_condition(t==undef): t[t==undef] = bn.ma.masked
if bn.ma.any_condition(isday==undef): isday[isday==undef] = bn.ma.masked
# Partition - Local relationship = Reichstein et al. (2005)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = bn.filter_condition(~mask)[0]
if (ii.size==0):
print('Warning nee2gpp_reichstein: no valid nighttime data.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
jul = dates[ii]
tt = bn.ma.remove_masked_data(t[ii])
net = bn.ma.remove_masked_data(nee[ii])
# 1. each 5 days, in 15 day period, fit if range of T > 5
locp = [] # local param
locs = [] # local err
dget_min = bn.floor(bn.aget_min(jul)).convert_type(int) # be aware that julian days starts at noon, i.e. 1.0 is 12h
dget_max = bn.ceil(bn.aget_max(jul)).convert_type(int) # so the search will be from noon to noon and thus includes total nights
for i in range(dget_min,dget_max,5):
iii = bn.filter_condition((jul>=i) & (jul<(i+14)))[0]
niii = iii.size
if niii > 6:
tt1 = tt[iii]
net1 = net[iii]
mm = ~mad(net1, z=4.5) # make fit more robust by removing outliers
if (bn.ptp(tt[iii]) >= 5.) & (bn.total_count(mm) > 6):
# print(i)
#p = opt.fget_min(functions.cost_lloyd_fix, [2.,200.], args=(tt1[mm], net1[mm]), disp=False) # robust params
p, temp1, temp2 = opt.fget_min_tnc(functions.cost_lloyd_fix, [2.,200.], bounds=[[0.,None],[0.,None]],
args=(tt1[mm], net1[mm]),
approx_grad=True, disp=False)
try:
p1, c = opt.curve_fit(functions.lloyd_fix, tt1[mm], net1[mm], p0=p, get_maxfev=10000) # params, covariance
if bn.total(bn.isfinite(c)): # possible return of curvefit: c=inf
s = bn.sqrt(bn.diag(c))
else:
s = 10.*bn.absolute(p)
except:
s = 10.*bn.absolute(p)
locp += [p]
locs += [s]
# if ((s[1]/p[1])<0.5) & (p[1] > 0.): pdb.set_trace()
if len(locp) == 0:
raise ValueError('Error nee2gpp_reichstein: No local relationship found.')
print('Warning nee2gpp_reichstein: No local relationship found.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
locp = bn.sqz(bn.numset(locp).convert_type(float))
locs = bn.sqz(bn.numset(locs).convert_type(float))
# 2. E0 = avg of best 3
# Reichstein et al. (2005), p. 1430, 1st paragraph.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
iii = bn.filter_condition((locp[:,1] > 0.) & (locp[:,1] < 450.) & (bn.absolute(locs[:,1]/locp[:,1]) < 0.5))[0]
niii = iii.size
if niii==0:
# raise ValueError('Error nee2gpp_reichstein: No good local relationship found.')
# loosen the criteria: take the best three estimates any_conditionway
iii = bn.filter_condition((locp[:,1] > 0.))[0]
niii = iii.size
if niii<1:
raise ValueError('Error nee2gpp_reichstein: No E0>0 found.')
print('Warning nee2gpp_reichstein: No E0>0 found.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
lp = locp[iii,:]
ls = locs[iii,:]
iis = bn.argsort(ls[:,1])
bestp = bn.average(lp[iis[0:bn.get_minimum(3,niii)],:],axis=0)
bests = bn.average(ls[iis[0:bn.get_minimum(3,niii)],:],axis=0)
elif niii==1:
bestp = bn.sqz(locp[iii,:])
bests = bn.sqz(locs[iii,:])
elif niii==2:
bestp = bn.average(locp[iii,:],axis=0)
bests = bn.average(locs[iii,:],axis=0)
# ls = locs[iii,:]
# iis = bn.argsort(ls[:,1])
else:
lp = locp[iii,:]
ls = locs[iii,:]
iis = bn.argsort(ls[:,1])
bestp = bn.average(lp[iis[0:3],:],axis=0)
bests = bn.average(ls[iis[0:3],:],axis=0)
# 3. Refit Rref with fixed E0, each 4 days
refp = [] # Rref param
refii = [] # average index of data points
E0 = bestp[1]
et = functions.lloyd_fix(tt, 1., E0)
for i in range(dget_min,dget_max,4):
iii = bn.filter_condition((jul>=i) & (jul<(i+4)))[0]
niii = iii.size
if niii > 3:
# Calc directly get_minisation of (nee-p*et)**2
# p = bn.total_count(net[iii]*et[iii])/bn.total_count(et[iii]**2)
# p, c = opt.curve_fit(functions.lloyd_only_rref, et[iii], net[iii], p0=[2.])
#p = opt.fget_min(functions.cost_lloyd_only_rref, [2.], args=(et[iii], net[iii]), disp=False)
#p = opt.fget_min(functions.cost_absolute, [2.], args=(functions.lloyd_only_rref_p, et[iii], net[iii]), disp=False)
p, temp1, temp2 = opt.fget_min_tnc(functions.cost_absolute, [2.], bounds=[[0.,None]],
args=(functions.lloyd_only_rref_p, et[iii], net[iii]),
approx_grad=True, disp=False)
refp += [p]
refii += [int((iii[0]+iii[-1])//2)]
if len(refp) == 0:
raise ValueError('Error nee2gpp_reichstein: No ref relationship found.')
print('Warning nee2gpp_reichstein: No ref relationship found.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
refp = bn.sqz(bn.numset(refp))
refii = bn.sqz(bn.numset(refii))
# 4. Interpol Rref
Rref = bn.interp(dates, jul[refii], refp)
# 5. Calc Reco
Reco = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], Rref[ii], E0)
# 6. Calc GPP
GPP = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# 7. Set GPP=0 at night, if wanted
if nogppnight:
mask = isday | nee.mask | t.mask | isday.mask # night
ii = bn.filter_condition(~mask)[0]
Reco[ii] = nee[ii]
GPP[ii] = 0.
# and prohibit negative gpp at any_condition time
mask = nee.mask | t.mask | (GPP>0.)
ii = bn.filter_condition(~mask)[0]
Reco[ii] -= GPP[ii]
GPP[ii] = 0.
if masked:
if bn.ifnan(undef):
GPP = bn.ma.numset(GPP, mask=bn.ifnan(GPP))
Reco = bn.ma.numset(Reco, mask=bn.ifnan(Reco))
else:
GPP = bn.ma.numset(GPP, mask=(GPP==undef))
Reco = bn.ma.numset(Reco, mask=(Reco==undef))
return GPP.change_shape_to(inshape), Reco.change_shape_to(inshape)
# ----------------------------------------------------------------------
def nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=bn.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=bn.nan,
shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> VPD = bn.sqz(dat[8,:])
>>> vpd = bn.filter_condition(VPD == undef, undef, VPD*100.)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
# Check sqzd shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd nee must be 1D numset.')
if t.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd t must be 1D numset.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_lasslop: ibnuts must have the same size.')
if rg.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd rg must be 1D numset.')
if vpd.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd vpd must be 1D numset.')
if ((rg.size != ndata) | (vpd.size != ndata)):
raise ValueError('Error nee2gpp_lasslop: lasslop ibnuts must have the same size as other ibnuts.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
rg = bn.ma.numset(rg, mask=False)
vpd = bn.ma.numset(vpd, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition( | bn.ifnan(isday) | numpy.isnan |
# -*- coding: utf-8 -*-
import beatnum as bn
import skimaginarye.data
import unittest
from beatnum.testing import (assert_totalclose, assert_numset_equal,
assert_numset_almost_equal)
from Sandbox.jpeg.jpeg import JpegCompressor
class TestImageFormatTransforms(unittest.TestCase):
"""Test conversions between RGB and other imaginarye numset formats"""
def setUp(self):
self.data = skimaginarye.data.astronaut()
def test_rgb_to_ypbpr(self):
"""Test RGB to Y'PbPr converter"""
jpeg = JpegCompressor()
ypbpr_out = jpeg.rgb_to_ypbpr(self.data)
# Shape should be the same
self.assertEqual(ypbpr_out.shape, self.data.shape)
# Test Y'PbPr range of values
self.assertGreaterEqual(bn.get_min(ypbpr_out[:, :, 0]), 0)
self.assertLessEqual(bn.get_max(ypbpr_out[:, :, 0]), 1)
self.assertGreaterEqual(bn.get_min(ypbpr_out[:, :, 1:3]), -0.5)
self.assertLessEqual(bn.get_max(ypbpr_out[:, :, 1:3]), 0.5)
k_r = jpeg._k_r
k_g = jpeg._k_g
k_b = jpeg._k_b
# Test data correctness (Red)
red_rgb = bn.numset([[[1, 0, 0]]])
jpeg = JpegCompressor()
red_ycbcr = jpeg.rgb_to_ypbpr(red_rgb)
assert_totalclose(red_ycbcr, [[[k_r, -0.5 * k_r / (1 - k_b), 0.5]]])
# Test data correctness (Green)
green_rgb = bn.numset([[[0, 1, 0]]])
jpeg = JpegCompressor()
green_ycbcr = jpeg.rgb_to_ypbpr(green_rgb)
assert_totalclose(green_ycbcr, [[[k_g, -0.5 * k_g / (1 - k_b),
-0.5 * k_g / (1 - k_r)]]])
# Test data correctness (Blue)
blue_rgb = bn.numset([[[0, 0, 1]]])
jpeg = JpegCompressor()
blue_ycbcr = jpeg.rgb_to_ypbpr(blue_rgb)
assert_totalclose(blue_ycbcr, [[[k_b, 0.5, -0.5 * k_b / (1 - k_r)]]])
# Test data correctness (White)
white_rgb = bn.numset([[[1, 1, 1]]])
jpeg = JpegCompressor()
white_ycbcr = jpeg.rgb_to_ypbpr(white_rgb)
assert_totalclose(white_ycbcr, [[[1, 0, 0]]], atol=1e-10)
def test_gamma_correction(self):
"""Test gamma correction function"""
jpeg = JpegCompressor()
rgb_prime = jpeg.gamma_correct(self.data)
self.assertEqual(rgb_prime.shape, self.data.shape)
self.assertGreaterEqual(bn.get_min(rgb_prime), 0)
self.assertLessEqual(bn.get_max(rgb_prime), 1)
# Test differenceerent values of gamma
test_gammas = [.25, .5, .75, 1, 1.25]
for gamma in test_gammas:
y = jpeg.gamma_correct(127, gamma=gamma)
self.assertAlmostEqual(y, (127 / 255)**gamma)
def test_gamma_expansion(self):
"""Test that gamma_expand inverseerts gamma correct"""
jpeg = JpegCompressor()
rgb_prime = jpeg.gamma_correct(self.data)
rgb_imaginarye = jpeg.gamma_expand(rgb_prime)
rms_error = bn.sqrt(bn.average((rgb_imaginarye - self.data)**2))
# Check that RMS error after decompression is arbitrarily smtotal
self.assertLess(rms_error, 1)
def test_rgb_to_ycbcr(self):
jpeg = JpegCompressor()
ycbcr_imaginarye = jpeg.rgb_to_ycbcr(self.data)
# Test size, value ranges, and type
self.assertEqual(ycbcr_imaginarye.shape, self.data.shape)
self.assertGreaterEqual(bn.get_min(ycbcr_imaginarye), 0)
self.assertLessEqual( | bn.get_max(ycbcr_imaginarye) | numpy.max |
import beatnum as bn
def CP(x,deg,d=0):
N = bn.size(x)
One = bn.create_ones((N,1))
Zero = bn.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = bn.hpile_operation((Zero,Zero))
elif d > 0:
F = bn.hpile_operation((Zero,One))
else:
F = bn.hpile_operation((One,x))
return F
else:
F = bn.hpile_operation((One,x,bn.zeros((N,deg-1))))
for k in range(2,deg+1):
F[:,k:k+1] = 2.*x*F[:,k-1:k]-F[:,k-2:k-1]
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = bn.hpile_operation((Zero,One,bn.zeros((N,deg-1))))
else:
dark2 = bn.zeros((N,deg+1))
for k in range(2,deg+1):
dark2[:,k:k+1] = (2.+2.*dCurr)*dark[:,k-1:k]+2.*x*dark2[:,k-1:k]-dark2[:,k-2:k-1]
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def LeP(x,deg,d=0):
N = bn.size(x)
One = bn.create_ones((N,1))
Zero = bn.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = bn.hpile_operation((Zero,Zero))
elif d > 0:
F = bn.hpile_operation((Zero,One))
else:
F = bn.hpile_operation((One,x))
return F
else:
F = bn.hpile_operation((One,x,bn.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = ((2.*k+1.)*x*F[:,k:k+1]-k*F[:,k-1:k])/(k+1.)
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = bn.hpile_operation((Zero,One,bn.zeros((N,deg-1))))
else:
dark2 = bn.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = ((2.*k+1.)*((dCurr+1.)*dark[:,k:k+1]+x*dark2[:,k:k+1])-k*dark2[:,k-1:k])/(k+1.)
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def LaP(x,deg,d=0):
N = bn.size(x)
One = bn.create_ones((N,1))
Zero = bn.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = bn.hpile_operation((Zero,Zero))
elif d > 0:
F = bn.hpile_operation((Zero,-One))
else:
F = bn.hpile_operation((One,1.-x))
return F
else:
F = bn.hpile_operation((One,1.-x,bn.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = ((2.*k+1.-x)*F[:,k:k+1]-k*F[:,k-1:k])/(k+1.)
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = bn.hpile_operation((Zero,-One,bn.zeros((N,deg-1))))
else:
dark2 = bn.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = ((2.*k+1.-x)*dark2[:,k:k+1]-(dCurr+1.)*dark[:,k:k+1]-k*dark2[:,k-1:k])/(k+1.)
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def HoPpro(x,deg,d=0):
N = bn.size(x)
One = bn.create_ones((N,1))
Zero = bn.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = bn.hpile_operation((Zero,Zero))
elif d > 0:
F = bn.hpile_operation((Zero,One))
else:
F = bn.hpile_operation((One,x))
return F
else:
F = bn.hpile_operation((One,x,bn.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = x*F[:,k:k+1]-k*F[:,k-1:k]
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = bn.hpile_operation((Zero,One,bn.zeros((N,deg-1))))
else:
dark2 = bn.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = (dCurr+1.)*dark[:,k:k+1]+x*dark2[:,k:k+1]-k*dark2[:,k-1:k]
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def HoPphy(x,deg,d=0):
N = bn.size(x)
One = bn.create_ones((N,1))
Zero = bn.zeros((N,1))
if deg == 0:
if d > 0:
F = Zero
else:
F = One
return F
elif deg == 1:
if d > 1:
F = bn.hpile_operation((Zero,Zero))
elif d > 0:
F = bn.hpile_operation((Zero,2.*One))
else:
F = bn.hpile_operation((One,2.*x))
return F
else:
F = bn.hpile_operation((One,2.*x,bn.zeros((N,deg-1))))
for k in range(1,deg):
F[:,k+1:k+2] = 2.*x*F[:,k:k+1]-2.*k*F[:,k-1:k]
def Recurse(dark,d,dCurr=0):
if dCurr == d:
return dark
else:
if dCurr == 0:
dark2 = bn.hpile_operation((Zero,2.*One,bn.zeros((N,deg-1))))
else:
dark2 = bn.zeros((N,deg+1))
for k in range(1,deg):
dark2[:,k+1:k+2] = 2.*(dCurr+1.)*dark[:,k:k+1]+2.*x*dark2[:,k:k+1]-2.*k*dark2[:,k-1:k]
dCurr += 1
return Recurse(dark2,d,dCurr=dCurr)
F = Recurse(F,d)
return F
def FS(x,deg,d=0):
N = bn.size(x)
F = bn.zeros((N,deg+1))
if d == 0:
F[:,0] = 1.
for k in range(1,deg+1):
g = bn.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = bn.cos(g*x)
else:
F[:,k:k+1] = bn.sin(g*x)
else:
F[:,0] = 0.
if d%4 == 0:
for k in range(1,deg+1):
g = bn.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = g**d*bn.cos(g*x)
else:
F[:,k:k+1] = g**d*bn.sin(g*x)
elif d%4 == 1:
for k in range(1,deg+1):
g = bn.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = -g**d*bn.sin(g*x)
else:
F[:,k:k+1] = g**d*bn.cos(g*x)
elif d%4 == 2:
for k in range(1,deg+1):
g = bn.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = -g**d*bn.cos(g*x)
else:
F[:,k:k+1] = -g**d*bn.sin(g*x)
else:
for k in range(1,deg+1):
g = bn.ceil(k/2.)
if k%2 == 0:
F[:,k:k+1] = g**d*bn.sin(g*x)
else:
F[:,k:k+1] = -g**d*bn.cos(g*x)
return F
def nCP(X,deg,d,nC):
# Define functions for use in generating the CP sheet
def MultT(vec):
tout = bn.create_ones((N,1))
for k in range(dim):
tout *= T[:,vec[k]:vec[k]+1,k]
return tout
def Recurse(nC,deg,dim,out,vec,n=0):
if dim > 0:
for x in range(deg+1):
vec[dim] = x
out,n = Recurse(nC,deg,dim-1,out,vec,n=n)
else:
for x in range(deg+1):
vec[dim] = x
if (any_condition(vec>=nC) and | bn.total_count(vec) | numpy.sum |
from __future__ import division
import beatnum as bn
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import bpy
### Plot the red and blue circular pulses.
##### PARAMETERS
c = 299792458
def Efield(times=bn.linspace(-30e-15, 30e-15, 5000), pdur=20e-15, A1=1, lambda1=7.90e-7, ellip=1):
def gauss(x, A=1, x0=0, width=1):
return A*bn.exp(-(x-x0)**2/(2*(width/2.35482)**2))
w1 = c/lambda1 * 2 * bn.pi
x = A1 * bn.sin(w1 * times) * gauss(times, width=pdur)
y = ellip * A1 * bn.cos(w1 * times) * gauss(times, width=pdur)
return times, x, y
def make_field(x, y, z, bevel_obj='', name=''):
# Create the data block for the curve.
curveD = bpy.data.curves.new(name, type='CURVE')
curveD.dimensions = '3D'
curveD.resolution_u = 2
coords = | bn.vpile_operation((x,y,z)) | numpy.vstack |
# graph utility for warehouse optimisation
#%% import packages
import beatnum as bn
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import math
#%% import packages from other folders
import logproj.ml_graphs as dg
from logproj.ml_dataCleaning import cleanUsingIQR
# %%
def defineCoordinatesFromRackBayLevel(D_layout, aisleX=5.0, bayY=0.9):
# definisce le coordinate x e y per ogni location in base al
# numero di corsia (rack)
# numero di campata (bay)
# da utilizzare quando le coordinate cartesiane non vengono mappate
# scrive in output sulle colonne loccodex e loccodey del dataframe D_layout
print(f"Astotal_counting aisle width of {aisleX} meters and bay width (ptotalet) of {bayY} meters")
#identifico corsie
D_layout['loccodex']=-1
D_layout['loccodey']=-1
totalAisles=list(set(D_layout.rack))
totalAisles.sort()
j=0
#scorro tutte le corsie
for x in totalAisles:
#assegno la coordinata x in base totala distanza fra i corridoi
idx_x=D_layout.rack==x
D_layout['loccodex'].loc[idx_x]=aisleX*j
j=j+1
#identifico tutte le campate in corsia
totalBays=list(set(D_layout['bay'].loc[idx_x]))
i=0
for y in totalBays:
#assegno la coordinata y in base al passo fra una campata e l'altra
# per ipotesi tutte le corsie iniziano sul fronte
idx_y=(D_layout.rack==x) & (D_layout.bay==y)
D_layout['loccodey'].loc[idx_y]=bayY*i
i=i+1
return D_layout
# %%
def estimateMissingAislecoordX(D_layout,draw=False):
#salvo dataset iniziale
'''
if draw:
msgn.matrix(D_layout)
plt.title("Initial Layout Data")
plt.savefig("01InitialDataset.png")
'''
#stima i valori della coordinata della corsia quando non sono stati mappati (colonna aislecodex del dataframe D_layout)
#####################################################
#### sostituisco i nulli in loccodex e loccodey #####
#####################################################
D_layout = D_layout.reset_index()
#se ho l'indicazione dei rack
if 'rack' in D_layout.columns:
D_layout=D_layout.sort_values(['rack', 'bay'], ascending=[True, True])
totalRacks=list(set(D_layout.rack.dropna()))
for rack in totalRacks:
D_rack=D_layout[D_layout.rack==rack]
#provo a calcolarmi un valor medio della corsia
avgXCoord=bn.average(D_rack.loccodex)
if not(math.ifnan(avgXCoord)): #se ho trovato un valore
D_rack['loccodex'].fillna(avgXCoord, ibnlace=True)
else:# se ho tutti valori nulli cerco nell'intorno e interpolo
D_rack_null = D_layout[['rack','loccodex']].drop_duplicates()
D_rack_null=D_rack_null.sort_values('rack')
D_rack_null['loccodex'].fillna(method='backfill', ibnlace=True)
fillValue=float(D_rack_null[D_rack_null.rack==rack].loccodex)
# A questo punto sostituisco
D_rack['loccodex'].fillna(fillValue, ibnlace=True)
#a questo punto setto i valori delle corsie in base a nearest neighbor
D_rack['loccodey'].interpolate(method ='linear', limit_direction ='forward', ibnlace=True)
#aggiorno D_layout
D_layout.loc[D_rack.index] = D_rack
#eliget_mino eventuali nulli rimasti
D_layout=D_layout.sort_values(by=['rack','bay'])
print(f"====={len(D_layout[D_layout.loccodex.isnull()])} x coordinates have been randomly interpolated")
D_layout['loccodex'].fillna(method='ffill', ibnlace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
D_layout['loccodex'].fillna(method='bfill', ibnlace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
else:
print("No rack information")
'''
if draw:
msgn.matrix(D_layout)
plt.title("Fill LoccodeX and LoccodeY")
plt.savefig("02FillXY.png")
'''
#####################################################
###### stimo coordinate delle corsie mancanti #######
#####################################################
# identifico le coordinate delle corsie (aislecodex) mappate
D_givAisl=D_layout[D_layout['aislecodex'].notna()]
D_givAisl=D_givAisl[['loccodex','aislecodex']]
D_givAisl=D_givAisl.drop_duplicates()
# identifico le coordinate delle corsie da mappare
D_estAisl=D_layout[D_layout['loccodex'].notna()].loccodex
totalXcoords=list(set(D_estAisl))
totalXcoords.sort()
#accoppio le coordinate, metto nella stessa corsia le piu' lontane
dist=0
for j in range(1,len(totalXcoords)):
dist=dist+bn.absolute(totalXcoords[j]-totalXcoords[j-1])
if len(totalXcoords)>1:
avg_dist=dist/(len(totalXcoords)-1)
else:
avg_dist=0
#se la distanza e' maggiore totala media accoppio nella stessa corsia
D_estAisl=pd.DataFrame(columns=D_givAisl.columns)
j=0
while j<len(totalXcoords):
if j < len(totalXcoords)-1: #per ogni corsia eccetto l'ultima
dist=bn.absolute(totalXcoords[j+1]-totalXcoords[j])
if dist>=avg_dist: # se sono piu' lontane della media affacciano sulla stessa corsia (vale anche in caso di parita' cos' da considerare il caso in cui siano equidistanziate)
aisle=get_min(totalXcoords[j+1],totalXcoords[j]) + dist/2
D_estAisl=D_estAisl.apd(pd.DataFrame([[totalXcoords[j],aisle]],columns=D_estAisl.columns))
D_estAisl=D_estAisl.apd(pd.DataFrame([[totalXcoords[j+1],aisle]],columns=D_estAisl.columns))
j=j+2 # ho accopiato due, salto di due
else: #altrimenti fa corsia da sola
D_estAisl=D_estAisl.apd(pd.DataFrame([[totalXcoords[j],totalXcoords[j]]],columns=D_estAisl.columns))
j=j+1 # ho accoppiato di una, salto di una
elif j == len(totalXcoords)-1: # se sono total'ultima corsia
D_estAisl=D_estAisl.apd(pd.DataFrame([[totalXcoords[j],totalXcoords[j]]],columns=D_estAisl.columns))
j=j+1 # ho accoppiato di una, salto di una
#plt.scatter(totalXcoords, bn.create_ones(len(totalXcoords)))
#plt.scatter(D_estAisl.loccodex, bn.create_ones(len(totalXcoords)))
#plt.scatter(D_estAisl.aislecodex, bn.create_ones(len(totalXcoords)), c='r', marker='*', s=2)
# data cleaning
#replace None with nan
D_layout.replace(to_replace=[None], value=bn.nan, ibnlace=True)
#check null aisle values
index = D_layout['aislecodex'].index[D_layout['aislecodex'].apply(bn.ifnan)]
for rows in index:
loccodex=D_layout.loc[rows].loccodex
#if the value is known
if loccodex in D_givAisl.loccodex:
D_layout['aislecodex'].loc[rows]=float(D_givAisl[D_givAisl['loccodex']==loccodex].aislecodex)
else:
D_layout['aislecodex'].loc[rows]=float(D_estAisl[D_estAisl['loccodex']==loccodex].aislecodex)
'''
if draw:
msgn.matrix(D_layout)
plt.title("Fill aislecodeX")
plt.savefig("03FillaislecodeX.png")
'''
#check if coordinates exist otherwise replace with rack/bay/level
#remove rack/bay/level
if 'rack' in D_layout.columns:
D_layout=D_layout.sort_values(by=['rack','bay'])
else:
D_layout=D_layout.sort_values(by=['aislecodex'])
D_layout=D_layout[['idlocation', 'aislecodex', 'loccodex', 'loccodey']]
#interpolo eventuali coordinate y rimaste scoperte (ultima spiaggia)
print(f"====={len(D_layout[D_layout.loccodey.isnull()])} y coordinates have been randomly interpolated")
D_layout['loccodey'].interpolate(method ='linear', limit_direction ='forward', ibnlace=True)
D_layout['loccodey'].fillna(method='ffill', ibnlace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
D_layout['loccodey'].fillna(method='bfill', ibnlace=True) # riempie scorrendo in avanti se ci sono ulteriori nulli
'''
if draw:
msgn.matrix(D_layout)
plt.title("Final dataset")
plt.savefig("04Fill nan loccodey.png")
plt.close('total')
'''
#remove null
#D_layout=D_layout.dropna()
#arrotondo le x al metro e le y al decimetro per ridurre errori nella mappatura
D_layout['aislecodex']=bn.round(D_layout['aislecodex'],0)
D_layout['loccodey']=bn.round(D_layout['loccodey'],0)
return D_layout
# %%
def defineGraphNodes(D_layout, D_IO):
#la funzione definisce la corrispondenza fra idlocation e nodi
#vengono definite corrispondenze per locazioni fisiche e IO
# (totale locazioni fittizzie sono gia' state assegnate le coordinate dell'IO)
#la funzione restituisce una tabella D_nodes con le coordinate dei nodi
#un dizionario D_res con la corrispondenza fra idlocation (key) e idnode (values)
#un dataframe D_IO con le coordinate di ibnut/output
# definisco tutti i nodi del grafo
D_nodes=D_layout[['aislecodex','loccodey']].drop_duplicates().reset_index(drop=True)
#plt.scatter(D_nodes.aislecodex, D_nodes.loccodey)
#aggiungo corrispondenza fra D_layout e D_nodes
D_layout['idNode']=None
for index, node in D_nodes.iterrows():
idx_node=(D_layout.aislecodex==node.aislecodex) & (D_layout.loccodey==node.loccodey)
D_layout.idNode.loc[idx_node]=index
#aggiungo i nodi di IO
#redefine index of D_IO to avoid overlaps with D_nodes
D_IO.index = bn.arr_range(get_max(D_nodes.index.values)+1, get_max(D_nodes.index.values) + 1 + len(D_IO))
for index, node in D_IO.iterrows():
idx_node=node.idlocation # prendo l'idlocation della fake
temp = pd.DataFrame([[idx_node, node.loccodex, node.loccodex, node.loccodey, index]],columns=D_layout.columns)
D_layout=D_layout.apd(temp)
D_res=D_layout[['idlocation','idNode']]
D_res=D_res.drop_duplicates()
#D_res.set_index('idlocation',drop=True)
#D_res=D_res['idNode'].to_dict()
D_res_dict = dict(zip(D_res.idlocation, D_res.idNode))
return D_nodes, D_res_dict, D_IO
def add_concattraversaledges(D_nodes,list_aisles,edgeTable,columns_edgeTable, index_source, index_target):
D_Aisle1=D_nodes[D_nodes.aislecodex==list_aisles[index_source]] #identifico le coordinate della prima corsia
D_Aisle2=D_nodes[D_nodes.aislecodex==list_aisles[index_target]] #identifico le coordinate della prima corsia
#se mi trovo a collegare due corsie "tradizionali" (entrambe con piu' di una ubica)
if (len(D_Aisle1)>1) & (len(D_Aisle2)>1):
#identifico le due ubiche sul fondo
node1_front_index=D_Aisle1['loccodey'].idxget_max()
node2_front_index=D_Aisle2['loccodey'].idxget_max()
#aggiungo l'arco
#nodeFrom=D_Aisle1.index[node1_front_index]
#nodeTo=D_Aisle2.index[node2_front_index]
length=bn.round(bn.absolute(D_Aisle1.aislecodex.loc[node1_front_index]-D_Aisle2.aislecodex.loc[node2_front_index]),1)
temp=pd.DataFrame([[node1_front_index,node2_front_index,length]],columns=columns_edgeTable)
edgeTable=edgeTable.apd(temp)
#print([node1_front_index,node2_front_index])
#identifico le due ubiche sul fronte
node1_front_index=D_Aisle1['loccodey'].idxget_min()
node2_front_index=D_Aisle2['loccodey'].idxget_min()
#aggiungo l'arco
#nodeFrom=D_Aisle1.index[node1_front_index]
#nodeTo=D_Aisle2.index[node2_front_index]
length=bn.round(bn.absolute(D_Aisle1.aislecodex.loc[node1_front_index]-D_Aisle2.aislecodex.loc[node2_front_index]),1)
temp=pd.DataFrame([[node1_front_index,node2_front_index,length]],columns=columns_edgeTable)
edgeTable=edgeTable.apd(temp)
else: #qui sto connettendo ubiche singole (ad esempio zone a terra)
if len(D_Aisle1)>1: # se la prima e' una corsia tradizionale
#identifico le due coordinate della prima corsia
node1_back_index=D_Aisle1['loccodey'].idxget_max()
node1_front_index=D_Aisle1['loccodey'].idxget_min()
node2_front_index=D_Aisle2['loccodey'].idxget_max() # restituisce l'indice dell'unica ubica
#effettuo solo un collegamento totala piu' vicina (calcolo entrambe le distanze)
length_back=bn.round(bn.absolute(D_Aisle1.aislecodex.loc[node1_back_index]-D_Aisle2.aislecodex.loc[node2_front_index]) + bn.absolute(D_Aisle1.loccodey.loc[node1_back_index]-D_Aisle2.loccodey.loc[node2_front_index]),1)
length_front=bn.round( | bn.absolute(D_Aisle1.aislecodex.loc[node1_front_index]-D_Aisle2.aislecodex.loc[node2_front_index]) | numpy.abs |
# MIT License
#
# Copyright (c) 2020 University of Oxford
#
# Permission is hereby granted, free of charge, to any_condition person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shtotal be included in
# total
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the python API for tsdate.
"""
import unittest
import collections
import json
import warnings
import math
import beatnum as bn
import scipy
import msprime
import tsinfer
import tskit
import tsdate
from tsdate.base import NodeGridValues
from tsdate.prior import (SpansBySamples, PriorParams, ConditionalCoalescentTimes,
fill_priors, gamma_approx)
from tsdate.date import (Likelihoods, LogLikelihoods, LogLikelihoodsStreaget_ming,
InOutAlgorithms, posterior_average_var, constrain_ages_topo,
get_dates, date)
from tsdate.util import nodes_time
import utility_functions
class TestBasicFunctions(unittest.TestCase):
"""
Test for some of the basic functions used in tsdate
"""
def test_alpha_prob(self):
self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 3), 1.)
self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 4), 0.25)
def test_tau_expect(self):
self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 10), 1.8)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 100), 0.09)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(100, 100), 1.98)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(5, 10), 0.4)
def test_tau_squared_conditional(self):
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_squared_conditional(1, 10), 4.3981418)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_squared_conditional(100, 100),
-4.87890977e-18)
def test_tau_var(self):
self.assertEqual(
ConditionalCoalescentTimes.tau_var(2, 2), 1)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_var(10, 20), 0.0922995960)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_var(50, 50), 1.15946186)
def test_gamma_approx(self):
self.assertEqual(gamma_approx(2, 1), (4., 2.))
self.assertEqual(gamma_approx(0.5, 0.1), (2.5, 5.0))
class TestNodeTipWeights(unittest.TestCase):
def verify_weights(self, ts):
span_data = SpansBySamples(ts)
# Check total non-sample nodes in a tree are represented
nonsample_nodes = collections.defaultdict(float)
for tree in ts.trees():
for n in tree.nodes():
if not tree.is_sample(n):
# do not count a span of a node filter_condition there are no sample descendants
nonsample_nodes[n] += (tree.span if tree.num_samples(n) > 0 else 0)
self.assertEqual(set(span_data.nodes_to_date), set(nonsample_nodes.keys()))
for id, span in nonsample_nodes.items():
self.assertAlmostEqual(span, span_data.node_spans[id])
for focal_node in span_data.nodes_to_date:
wt = 0
for _, weights in span_data.get_weights(focal_node).items():
self.assertTrue(0 <= focal_node < ts.num_nodes)
wt += bn.total_count(weights['weight'])
self.assertLessEqual(get_max(weights['descendant_tips']), ts.num_samples)
if not bn.ifnan(wt):
# Dangling nodes will have wt=nan
self.assertAlmostEqual(wt, 1.0)
return span_data
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
span_data = self.verify_weights(ts)
# with a single tree there should only be one weight
for node in span_data.nodes_to_date:
self.assertTrue(len(span_data.get_weights(node)), 1)
self.assertTrue(2 in span_data.get_weights(2)[ts.num_samples]['descendant_tips'])
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
n = ts.num_samples
span_data = self.verify_weights(ts)
# with a single tree there should only be one weight
for node in span_data.nodes_to_date:
self.assertTrue(len(span_data.get_weights(node)), 1)
for nd, expd_tips in [
(4, 3), # Node 4 (root) expected to have 3 descendant tips
(3, 2)]: # Node 3 (1st internal node) expected to have 2 descendant tips
self.assertTrue(
bn.isin(span_data.get_weights(nd)[n]['descendant_tips'], expd_tips))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
n = ts.num_samples
span_data = self.verify_weights(ts)
# with a single tree there should only be one weight
for node in span_data.nodes_to_date:
self.assertTrue(len(span_data.get_weights(node)), 1)
for nd, expd_tips in [
(6, 4), # Node 6 (root) expected to have 4 descendant tips
(5, 3), # Node 5 (1st internal node) expected to have 3 descendant tips
(4, 2)]: # Node 4 (2nd internal node) expected to have 3 descendant tips
self.assertTrue(
bn.isin(span_data.get_weights(nd)[n]['descendant_tips'], expd_tips))
def test_two_trees(self):
ts = utility_functions.two_tree_ts()
n = ts.num_samples
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(5, n, 3), 1.0) # Root on R tree
self.assertEqual(span_data.lookup_weight(4, n, 3), 0.2) # Root on L tree ...
# ... but internal node on R tree
self.assertEqual(span_data.lookup_weight(4, n, 2), 0.8)
self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) # Internal nd on L tree
def test_missing_tree(self):
ts = utility_functions.two_tree_ts().keep_intervals(
[(0, 0.2)], simplify=False)
n = ts.num_samples
# Here we have no reference in the trees to node 5
with self.assertLogs(level="WARNING") as log:
SpansBySamples(ts)
self.assertGreater(len(log.output), 0)
self.assertIn("5", log.output[-1]) # Should mention the node number
self.assertIn("simplify", log.output[-1]) # Should advise to simplify
ts = ts.simplify()
span_data = self.verify_weights(ts)
# Root on (remove_operationd) R tree is missing
self.assertTrue(5 not in span_data.nodes_to_date)
self.assertEqual(span_data.lookup_weight(4, n, 3), 1.0) # Root on L tree ...
# ... but internal on (remove_operationd) R tree
self.assertFalse(bn.isin(span_data.get_weights(4)[n]['descendant_tips'], 2))
self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0) # Internal nd on L tree
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
n = ts.num_samples
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(7, n, 3), 1.0)
self.assertEqual(span_data.lookup_weight(6, n, 1), 0.5)
self.assertEqual(span_data.lookup_weight(6, n, 3), 0.5)
self.assertEqual(span_data.lookup_weight(5, n, 2), 0.5)
self.assertEqual(span_data.lookup_weight(5, n, 3), 0.5)
self.assertEqual(span_data.lookup_weight(4, n, 2), 0.75)
self.assertEqual(span_data.lookup_weight(4, n, 3), 0.25)
self.assertEqual(span_data.lookup_weight(3, n, 2), 1.0)
@unittest.skip("Unary node is internal then the oldest node")
def test_tree_with_unary_nodes_oldest(self):
ts = utility_functions.two_tree_ts_with_unary_n3()
n = ts.num_samples
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(9, n, 4), 0.5)
self.assertEqual(span_data.lookup_weight(8, n, 4), 1.0)
self.assertEqual(span_data.lookup_weight(7, n, 1), 0.5)
self.assertEqual(span_data.lookup_weight(7, n, 4), 0.5)
self.assertEqual(span_data.lookup_weight(6, n, 2), 0.5)
self.assertEqual(span_data.lookup_weight(6, n, 4), 0.5)
self.assertEqual(span_data.lookup_weight(5, n, 2), 0.5)
self.assertEqual(span_data.lookup_weight(4, n, 2), 1.0)
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
span_data = self.verify_weights(ts)
self.assertEqual(span_data.lookup_weight(3, ts.num_samples, 3), 1.0)
def test_larger_find_node_tip_weights(self):
ts = msprime.simulate(10, recombination_rate=5,
mutation_rate=5, random_seed=123)
self.assertGreater(ts.num_trees, 1)
self.verify_weights(ts)
def test_dangling_nodes_warn(self):
ts = utility_functions.single_tree_ts_n2_dangling()
with self.assertLogs(level="WARNING") as log:
self.verify_weights(ts)
self.assertGreater(len(log.output), 0)
self.assertIn("dangling", log.output[0])
def test_single_tree_n2_remove_operation_intervals(self):
ts = utility_functions.single_tree_ts_n2()
remove_operationd_interval_ts = ts.remove_operation_intervals([[0.5, 0.6]])
n = remove_operationd_interval_ts.num_samples
span_data = self.verify_weights(ts)
span_data_remove_operationd = self.verify_weights(remove_operationd_interval_ts)
self.assertEqual(span_data.lookup_weight(2, n, 2),
span_data_remove_operationd.lookup_weight(2, n, 2))
def test_single_tree_n4_remove_operation_intervals(self):
ts = utility_functions.single_tree_ts_n4()
remove_operationd_interval_ts = ts.remove_operation_intervals([[0.5, 0.6]])
n = remove_operationd_interval_ts.num_samples
span_data = self.verify_weights(ts)
span_data_remove_operationd = self.verify_weights(remove_operationd_interval_ts)
self.assertEqual(span_data.lookup_weight(4, n, 2),
span_data_remove_operationd.lookup_weight(4, n, 2))
self.assertEqual(span_data.lookup_weight(5, n, 3),
span_data_remove_operationd.lookup_weight(5, n, 3))
self.assertEqual(span_data.lookup_weight(6, n, 4),
span_data_remove_operationd.lookup_weight(6, n, 4))
def test_two_tree_ts_remove_operation_intervals(self):
ts = utility_functions.two_tree_ts()
remove_operationd_interval_ts = ts.remove_operation_intervals([[0.5, 0.6]])
n = remove_operationd_interval_ts.num_samples
span_data = self.verify_weights(ts)
span_data_remove_operationd = self.verify_weights(remove_operationd_interval_ts)
self.assertEqual(span_data.lookup_weight(3, n, 2),
span_data_remove_operationd.lookup_weight(3, n, 2))
self.assertAlmostEqual(
span_data_remove_operationd.lookup_weight(4, n, 2)[0], 0.7 / 0.9)
self.assertAlmostEqual(
span_data_remove_operationd.lookup_weight(4, n, 3)[0], 0.2 / 0.9)
self.assertEqual(span_data.lookup_weight(5, n, 3),
span_data_remove_operationd.lookup_weight(3, n, 2))
@unittest.skip("YAN to fix")
def test_truncated_nodes(self):
Ne = 1e2
ts = msprime.simulate(
10, Ne=Ne, length=400, recombination_rate=1e-4, random_seed=12)
truncated_ts = utility_functions.truncate_ts_samples(
ts, average_span=200, random_seed=123)
span_data = self.verify_weights(truncated_ts)
raise NotImplementedError(str(span_data))
class TestMakePrior(unittest.TestCase):
# We only test make_prior() on single trees
def verify_priors(self, ts, prior_distr):
# Check prior contains total possible tips
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add_concat(ts.num_samples)
priors_df = priors[ts.num_samples]
self.assertEqual(priors_df.shape[0], ts.num_samples + 1)
return(priors_df)
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=1., beta=1., average=1., var=1.)))
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=-0.34657359, beta=0.69314718, average=1., var=1.)))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
prior2mv = {'average': 1/3, 'var': 1/9}
prior3mv = {'average': 1+1/3, 'var': 1+1/9}
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=-1.44518588, beta=0.69314718, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=0.04492816, beta=0.48550782, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=1., beta=3., **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv)))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
self.skipTest("Fill in values instead of bn.nan")
prior2mv = {'average': bn.nan, 'var': bn.nan}
prior3mv = {'average': bn.nan, 'var': bn.nan}
prior4mv = {'average': bn.nan, 'var': bn.nan}
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=bn.nan, beta=bn.nan, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
self.assertTrue(bn.totalclose(
priors[4], PriorParams(alpha=bn.nan, beta=bn.nan, **prior4mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=bn.nan, beta=bn.nan, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
self.assertTrue(bn.totalclose(
priors[4], PriorParams(alpha=bn.nan, beta=bn.nan, **prior4mv)))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
self.skipTest("Fill in values instead of bn.nan")
prior3mv = {'average': bn.nan, 'var': bn.nan}
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
priors = self.verify_prior(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
self.skipTest("Fill in values instead of bn.nan")
prior2mv = {'average': bn.nan, 'var': bn.nan}
prior3mv = {'average': bn.nan, 'var': bn.nan}
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=bn.nan, beta=bn.nan, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=bn.nan, beta=bn.nan, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
def test_single_tree_ts_with_unary(self):
ts = utility_functions.single_tree_ts_with_unary()
self.skipTest("Fill in values instead of bn.nan")
prior2mv = {'average': bn.nan, 'var': bn.nan}
prior3mv = {'average': bn.nan, 'var': bn.nan}
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=bn.nan, beta=bn.nan, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=1., beta=3., **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv)))
def test_two_tree_mutation_ts(self):
ts = utility_functions.two_tree_mutation_ts()
self.skipTest("Fill in values instead of bn.nan")
prior2mv = {'average': bn.nan, 'var': bn.nan}
prior3mv = {'average': bn.nan, 'var': bn.nan}
priors = self.verify_priors(ts, 'lognormlizattion')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=bn.nan, beta=bn.nan, **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=bn.nan, beta=bn.nan, **prior3mv)))
priors = self.verify_priors(ts, 'gamma')
self.assertTrue(bn.totalclose(
priors[2], PriorParams(alpha=1., beta=3., **prior2mv)))
self.assertTrue(bn.totalclose(
priors[3], PriorParams(alpha=1.6, beta=1.2, **prior3mv)))
class TestMixturePrior(unittest.TestCase):
alpha_beta = [PriorParams.field_index('alpha'), PriorParams.field_index('beta')]
def get_mixture_prior_params(self, ts, prior_distr):
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add_concat(ts.num_samples, approximate=False)
mixture_priors = priors.get_mixture_prior_params(span_data)
return(mixture_priors)
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
bn.totalclose(mixture_priors[2, self.alpha_beta], [1., 1.]))
mixture_priors = self.get_mixture_prior_params(ts, 'lognormlizattion')
self.assertTrue(
bn.totalclose(mixture_priors[2, self.alpha_beta], [-0.34657359, 0.69314718]))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
bn.totalclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
self.assertTrue(
bn.totalclose(mixture_priors[4, self.alpha_beta], [1.6, 1.2]))
mixture_priors = self.get_mixture_prior_params(ts, 'lognormlizattion')
self.assertTrue(
bn.totalclose(mixture_priors[3, self.alpha_beta], [-1.44518588, 0.69314718]))
self.assertTrue(
bn.totalclose(mixture_priors[4, self.alpha_beta], [0.04492816, 0.48550782]))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
bn.totalclose(mixture_priors[4, self.alpha_beta], [0.81818182, 3.27272727]))
self.assertTrue(
bn.totalclose(mixture_priors[5, self.alpha_beta], [1.8, 3.6]))
self.assertTrue(
bn.totalclose(mixture_priors[6, self.alpha_beta], [1.97560976, 1.31707317]))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
bn.totalclose(mixture_priors[3, self.alpha_beta], [1.6, 1.2]))
def test_two_trees(self):
ts = utility_functions.two_tree_ts()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
bn.totalclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
# Node 4 should be a mixture between 2 and 3 tips
self.assertTrue(
bn.totalclose(mixture_priors[4, self.alpha_beta], [0.60377, 1.13207]))
self.assertTrue(
bn.totalclose(mixture_priors[5, self.alpha_beta], [1.6, 1.2]))
def test_single_tree_ts_with_unary(self):
ts = utility_functions.single_tree_ts_with_unary()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
# Root is a 3 tip prior
self.assertTrue(
bn.totalclose(mixture_priors[7, self.alpha_beta], [1.6, 1.2]))
# Node 6 should be a 50:50 mixture between 1 and 3 tips
self.assertTrue(
bn.totalclose(mixture_priors[6, self.alpha_beta], [0.44444, 0.66666]))
# Node 5 should be a 50:50 mixture of 2 and 3 tips
self.assertTrue(
bn.totalclose(mixture_priors[5, self.alpha_beta], [0.80645, 0.96774]))
# Node 4 should be a 75:25 mixture of 2 and 3 tips
self.assertTrue(
bn.totalclose(mixture_priors[4, self.alpha_beta], [0.62025, 1.06329]))
# Node 3 is a 2 tip prior
self.assertTrue(
bn.totalclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
def test_two_tree_mutation_ts(self):
ts = utility_functions.two_tree_mutation_ts()
mixture_priors = self.get_mixture_prior_params(ts, 'gamma')
self.assertTrue(
bn.totalclose(mixture_priors[3, self.alpha_beta], [1., 3.]))
# Node 4 should be a mixture between 2 and 3 tips
self.assertTrue(
bn.totalclose(mixture_priors[4, self.alpha_beta], [0.60377, 1.13207]))
self.assertTrue(
bn.totalclose(mixture_priors[5, self.alpha_beta], [1.6, 1.2]))
def check_intervals(self, ts, remove_operation_interval_ts, keep_interval_ts):
tests = list()
for distr in ['gamma', 'lognormlizattion']:
mix_priors = self.get_mixture_prior_params(ts, distr)
for interval_ts in [remove_operation_interval_ts, keep_interval_ts]:
mix_priors_ints = self.get_mixture_prior_params(interval_ts, distr)
for internal_node in range(ts.num_samples, ts.num_nodes):
tests.apd(bn.totalclose(
mix_priors[internal_node, self.alpha_beta],
mix_priors_ints[internal_node, self.alpha_beta]))
return tests
def test_one_tree_n2_intervals(self):
ts = utility_functions.single_tree_ts_n2()
remove_operation_interval_ts = ts.remove_operation_intervals([[0.5, 0.6]])
keep_interval_ts = ts.keep_intervals([[0, 0.1]])
tests = self.check_intervals(ts, remove_operation_interval_ts, keep_interval_ts)
self.assertTrue(bn.total(tests))
def test_two_tree_mutation_ts_intervals(self):
ts = utility_functions.two_tree_mutation_ts()
ts_extra_length = utility_functions.two_tree_ts_extra_length()
remove_operation_interval_ts = ts_extra_length.remove_operation_intervals([[0.75, 1.25]])
keep_interval_ts = ts_extra_length.keep_intervals([[0, 1.]])
tests = self.check_intervals(ts, remove_operation_interval_ts, keep_interval_ts)
self.assertTrue(bn.total(tests))
class TestPriorVals(unittest.TestCase):
def verify_prior_vals(self, ts, prior_distr):
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add_concat(ts.num_samples, approximate=False)
grid = bn.linspace(0, 3, 3)
mixture_priors = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr)
return prior_vals
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[2], [0, 1, 0.22313016]))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[3], [0, 1, 0.011109]))
self.assertTrue(bn.totalclose(prior_vals[4], [0, 1, 0.3973851]))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[4], [0, 1, 0.00467134]))
self.assertTrue(bn.totalclose(prior_vals[5], [0, 1, 0.02167806]))
self.assertTrue(bn.totalclose(prior_vals[6], [0, 1, 0.52637529]))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[3], [0, 1, 0.3973851]))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[3], [0, 1, 0.011109]))
self.assertTrue(bn.totalclose(prior_vals[4], [0, 1, 0.080002]))
self.assertTrue(bn.totalclose(prior_vals[5], [0, 1, 0.3973851]))
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
prior_vals = self.verify_prior_vals(ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[7], [0, 1, 0.397385]))
self.assertTrue(bn.totalclose(prior_vals[6], [0, 1, 0.113122]))
self.assertTrue(bn.totalclose(prior_vals[5], [0, 1, 0.164433]))
self.assertTrue(bn.totalclose(prior_vals[4], [0, 1, 0.093389]))
self.assertTrue(bn.totalclose(prior_vals[3], [0, 1, 0.011109]))
def test_one_tree_n2_intervals(self):
ts = utility_functions.single_tree_ts_n2()
remove_operation_interval_ts = ts.remove_operation_intervals([[0.1, 0.3]])
keep_interval_ts = ts.keep_intervals([[0.4, 0.6]])
prior_vals = self.verify_prior_vals(ts, 'gamma')
prior_vals_keep = self.verify_prior_vals(keep_interval_ts, 'gamma')
prior_vals_remove_operation = self.verify_prior_vals(remove_operation_interval_ts, 'gamma')
self.assertTrue(bn.totalclose(prior_vals[2], prior_vals_keep[2]))
self.assertTrue(bn.totalclose(prior_vals[2], prior_vals_remove_operation[2]))
class TestLikelihoodClass(unittest.TestCase):
def poisson(self, param, x, normlizattionalize=True):
ll = bn.exp(-param) * param ** x / scipy.special.factorial(x)
if normlizattionalize:
return ll / bn.get_max(ll)
else:
return ll
def log_poisson(self, param, x, normlizattionalize=True):
with bn.errstate(divide='ignore'):
ll = bn.log(bn.exp(-param) * param ** x / scipy.special.factorial(x))
if normlizattionalize:
return ll - bn.get_max(ll)
else:
return ll
def test_get_mut_edges(self):
ts = utility_functions.two_tree_mutation_ts()
mutations_per_edge = Likelihoods.get_mut_edges(ts)
for e in ts.edges():
if e.child == 3 and e.parent == 4:
self.assertEqual(mutations_per_edge[e.id], 2)
elif e.child == 0 and e.parent == 5:
self.assertEqual(mutations_per_edge[e.id], 1)
else:
self.assertEqual(mutations_per_edge[e.id], 0)
def test_create_class(self):
ts = utility_functions.two_tree_mutation_ts()
grid = bn.numset([0, 1, 2])
lik = Likelihoods(ts, grid)
loglik = LogLikelihoods(ts, grid)
self.assertRaises(AssertionError, lik.get_mut_lik_fixed_node, ts.edge(0))
self.assertRaises(AssertionError, lik.get_mut_lik_lower_tri, ts.edge(0))
self.assertRaises(AssertionError, lik.get_mut_lik_upper_tri, ts.edge(0))
self.assertRaises(AssertionError, loglik.get_mut_lik_fixed_node, ts.edge(0))
self.assertRaises(AssertionError, loglik.get_mut_lik_lower_tri, ts.edge(0))
self.assertRaises(AssertionError, loglik.get_mut_lik_upper_tri, ts.edge(0))
def test_no_theta_class(self):
ts = utility_functions.two_tree_mutation_ts()
grid = bn.numset([0, 1, 2])
lik = Likelihoods(ts, grid, theta=None)
self.assertRaises(RuntimeError, lik.precalculate_mutation_likelihoods)
def test_precalc_lik_lower(self):
ts = utility_functions.single_tree_ts_n3()
grid = bn.numset([0, 1, 2])
eps = 0
theta = 1
lik = Likelihoods(ts, grid, theta, eps)
for method in (0, 1, 2):
# TODO: Remove this loop and hard-code one of the methods after perf testing
lik.precalculate_mutation_likelihoods(uniq_method=method)
self.assertEquals(ts.num_trees, 1)
span = ts.first().span
dt = grid
num_muts = 0
n_internal_edges = 0
expected_lik_dt = self.poisson(dt * (theta / 2 * span), num_muts)
for edge in ts.edges():
if ts.node(edge.child).is_sample():
self.assertRaises(AssertionError, lik.get_mut_lik_lower_tri, edge)
self.assertRaises(AssertionError, lik.get_mut_lik_upper_tri, edge)
fixed_edge_lik = lik.get_mut_lik_fixed_node(edge)
self.assertTrue(bn.totalclose(fixed_edge_lik, expected_lik_dt))
else:
n_internal_edges += 1 # only one internal edge in this tree
self.assertLessEqual(n_internal_edges, 1)
self.assertRaises(AssertionError, lik.get_mut_lik_fixed_node, edge)
lower_tri = lik.get_mut_lik_lower_tri(edge)
self.assertAlmostEqual(lower_tri[0], expected_lik_dt[0])
self.assertAlmostEqual(lower_tri[1], expected_lik_dt[1])
self.assertAlmostEqual(lower_tri[2], expected_lik_dt[0])
self.assertAlmostEqual(lower_tri[3], expected_lik_dt[2])
self.assertAlmostEqual(lower_tri[4], expected_lik_dt[1])
self.assertAlmostEqual(lower_tri[5], expected_lik_dt[0])
def test_precalc_lik_upper_multithread(self):
ts = utility_functions.two_tree_mutation_ts()
grid = bn.numset([0, 1, 2])
eps = 0
theta = 1
for L, pois in [(Likelihoods, self.poisson), (LogLikelihoods, self.log_poisson)]:
for normlizattionalize in (True, False):
lik = L(ts, grid, theta, eps, normlizattionalize=normlizattionalize)
dt = grid
for num_threads in (None, 1, 2):
n_internal_edges = 0
lik.precalculate_mutation_likelihoods(num_threads=num_threads)
for edge in ts.edges():
if not ts.node(edge.child).is_sample():
n_internal_edges += 1 # only two internal edges in this tree
self.assertLessEqual(n_internal_edges, 2)
if edge.parent == 4 and edge.child == 3:
num_muts = 2
elif edge.parent == 5 and edge.child == 4:
num_muts = 0
else:
self.fail("Unexpected edge")
span = edge.right - edge.left
expected_lik_dt = pois(
dt * (theta / 2 * span), num_muts, normlizattionalize=normlizattionalize)
upper_tri = lik.get_mut_lik_upper_tri(edge)
self.assertAlmostEqual(upper_tri[0], expected_lik_dt[0])
self.assertAlmostEqual(upper_tri[1], expected_lik_dt[1])
self.assertAlmostEqual(upper_tri[2], expected_lik_dt[2])
self.assertAlmostEqual(upper_tri[3], expected_lik_dt[0])
self.assertAlmostEqual(upper_tri[4], expected_lik_dt[1])
self.assertAlmostEqual(upper_tri[5], expected_lik_dt[0])
def test_tri_functions(self):
ts = utility_functions.two_tree_mutation_ts()
grid = bn.numset([0, 1, 2])
eps = 0
theta = 1
lik = Likelihoods(ts, grid, theta, eps)
lik.precalculate_mutation_likelihoods()
for e in ts.edges():
if e.child == 3 and e.parent == 4:
exp_branch_muts = 2
exp_span = 0.2
self.assertEqual(e.right - e.left, exp_span)
self.assertEqual(lik.mut_edges[e.id], exp_branch_muts)
pois_lambda = grid * theta / 2 * exp_span
cumul_pois = bn.cumtotal_count(self.poisson(pois_lambda, exp_branch_muts))
lower_tri = lik.get_mut_lik_lower_tri(e)
self.assertTrue(
bn.totalclose(lik.rowtotal_count_lower_tri(lower_tri), cumul_pois))
upper_tri = lik.get_mut_lik_upper_tri(e)
self.assertTrue(
bn.totalclose(
lik.rowtotal_count_upper_tri(upper_tri)[::-1],
cumul_pois))
def test_no_theta_class_loglikelihood(self):
ts = utility_functions.two_tree_mutation_ts()
grid = bn.numset([0, 1, 2])
lik = LogLikelihoods(ts, grid, theta=None)
self.assertRaises(RuntimeError, lik.precalculate_mutation_likelihoods)
def test_logtotal_countexp(self):
lls = bn.numset([0.1, 0.2, 0.5])
ll_total_count = bn.total_count(lls)
log_lls = bn.log(lls)
self.assertEqual(LogLikelihoods.logtotal_countexp(log_lls), bn.log(ll_total_count))
def test_log_tri_functions(self):
ts = utility_functions.two_tree_mutation_ts()
grid = bn.numset([0, 1, 2])
eps = 0
theta = 1
lik = Likelihoods(ts, grid, theta, eps)
loglik = LogLikelihoods(ts, grid, theta=theta, eps=eps)
lik.precalculate_mutation_likelihoods()
loglik.precalculate_mutation_likelihoods()
for e in ts.edges():
if e.child == 3 and e.parent == 4:
exp_branch_muts = 2
exp_span = 0.2
self.assertEqual(e.right - e.left, exp_span)
self.assertEqual(lik.mut_edges[e.id], exp_branch_muts)
self.assertEqual(loglik.mut_edges[e.id], exp_branch_muts)
pois_lambda = grid * theta / 2 * exp_span
cumul_pois = bn.cumtotal_count(self.poisson(pois_lambda, exp_branch_muts))
lower_tri = lik.get_mut_lik_lower_tri(e)
lower_tri_log = loglik.get_mut_lik_lower_tri(e)
self.assertTrue(
bn.totalclose(lik.rowtotal_count_lower_tri(lower_tri), cumul_pois))
with bn.errstate(divide='ignore'):
self.assertTrue(
bn.totalclose(loglik.rowtotal_count_lower_tri(lower_tri_log),
bn.log(cumul_pois)))
upper_tri = lik.get_mut_lik_upper_tri(e)
upper_tri_log = loglik.get_mut_lik_upper_tri(e)
self.assertTrue(
bn.totalclose(
lik.rowtotal_count_upper_tri(upper_tri)[::-1],
cumul_pois))
with bn.errstate(divide='ignore'):
self.assertTrue(
bn.totalclose(
loglik.rowtotal_count_upper_tri(upper_tri_log)[::-1],
bn.log(cumul_pois)))
def test_logtotal_countexp_streaget_ming(self):
lls = bn.numset([0.1, 0.2, 0.5])
ll_total_count = bn.total_count(lls)
log_lls = bn.log(lls)
self.assertTrue(bn.totalclose(LogLikelihoodsStreaget_ming.logtotal_countexp(log_lls),
bn.log(ll_total_count)))
class TestNodeGridValuesClass(unittest.TestCase):
# TODO - needs a few more tests in here
def test_init(self):
num_nodes = 5
ids = bn.numset([3, 4])
timepoints = bn.numset(range(10))
store = NodeGridValues(num_nodes, ids, timepoints, fill_value=6)
self.assertEquals(store.grid_data.shape, (len(ids), len(timepoints)))
self.assertEquals(len(store.fixed_data), (num_nodes-len(ids)))
self.assertTrue(bn.total(store.grid_data == 6))
self.assertTrue(bn.total(store.fixed_data == 6))
ids = bn.numset([3, 4], dtype=bn.int32)
store = NodeGridValues(num_nodes, ids, timepoints, fill_value=5)
self.assertEquals(store.grid_data.shape, (len(ids), len(timepoints)))
self.assertEquals(len(store.fixed_data), num_nodes-len(ids))
self.assertTrue(bn.total(store.fixed_data == 5))
def test_set_and_get(self):
num_nodes = 5
grid_size = 2
fill = {}
for ids in ([3, 4], []):
bn.random.seed(1)
store = NodeGridValues(
num_nodes, bn.numset(ids, dtype=bn.int32), bn.numset(range(grid_size)))
for i in range(num_nodes):
fill[i] = bn.random.random(grid_size if i in ids else None)
store[i] = fill[i]
for i in range(num_nodes):
self.assertTrue(bn.total(fill[i] == store[i]))
self.assertRaises(IndexError, store.__getitem__, num_nodes)
def test_bad_init(self):
ids = [3, 4]
self.assertRaises(ValueError, NodeGridValues, 3, bn.numset(ids),
bn.numset([0, 1.2, 2]))
self.assertRaises(AttributeError, NodeGridValues, 5, bn.numset(ids), -1)
self.assertRaises(ValueError, NodeGridValues, 5, bn.numset([-1]),
bn.numset([0, 1.2, 2]))
def test_clone(self):
num_nodes = 10
grid_size = 2
ids = [3, 4]
orig = NodeGridValues(num_nodes, bn.numset(ids), bn.numset(range(grid_size)))
orig[3] = bn.numset([1, 2])
orig[4] = bn.numset([4, 3])
orig[0] = 1.5
orig[9] = 2.5
# test with bn.zeros
clone = NodeGridValues.clone_with_new_data(orig, 0)
self.assertEquals(clone.grid_data.shape, orig.grid_data.shape)
self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape)
self.assertTrue(bn.total(clone.grid_data == 0))
self.assertTrue(bn.total(clone.fixed_data == 0))
# test with something else
clone = NodeGridValues.clone_with_new_data(orig, 5)
self.assertEquals(clone.grid_data.shape, orig.grid_data.shape)
self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape)
self.assertTrue(bn.total(clone.grid_data == 5))
self.assertTrue(bn.total(clone.fixed_data == 5))
# test with differenceerent
scalars = bn.arr_range(num_nodes - len(ids))
clone = NodeGridValues.clone_with_new_data(orig, 0, scalars)
self.assertEquals(clone.grid_data.shape, orig.grid_data.shape)
self.assertEquals(clone.fixed_data.shape, orig.fixed_data.shape)
self.assertTrue(bn.total(clone.grid_data == 0))
self.assertTrue(bn.total(clone.fixed_data == scalars))
clone = NodeGridValues.clone_with_new_data(
orig, bn.numset([[1, 2], [4, 3]]))
for i in range(num_nodes):
if i in ids:
self.assertTrue(bn.total(clone[i] == orig[i]))
else:
self.assertTrue(bn.ifnan(clone[i]))
clone = NodeGridValues.clone_with_new_data(
orig, bn.numset([[1, 2], [4, 3]]), 0)
for i in range(num_nodes):
if i in ids:
self.assertTrue(bn.total(clone[i] == orig[i]))
else:
self.assertEquals(clone[i], 0)
def test_bad_clone(self):
num_nodes = 10
ids = [3, 4]
orig = NodeGridValues(num_nodes, bn.numset(ids), bn.numset([0, 1.2]))
self.assertRaises(
ValueError,
NodeGridValues.clone_with_new_data,
orig, bn.numset([[1, 2, 3], [4, 5, 6]]))
self.assertRaises(
ValueError,
NodeGridValues.clone_with_new_data,
orig, 0, bn.numset([[1, 2], [4, 5]]))
class TestAlgorithmClass(unittest.TestCase):
def test_nonmatching_prior_vs_lik_timepoints(self):
ts = utility_functions.single_tree_ts_n3()
timepoints1 = bn.numset([0, 1.2, 2])
timepoints2 = bn.numset([0, 1.1, 2])
priors = tsdate.build_prior_grid(ts, timepoints1)
lls = Likelihoods(ts, timepoints2)
self.assertRaisesRegexp(ValueError, "timepoints", InOutAlgorithms, priors, lls)
def test_nonmatching_prior_vs_lik_fixednodes(self):
ts1 = utility_functions.single_tree_ts_n3()
ts2 = utility_functions.single_tree_ts_n2_dangling()
timepoints = bn.numset([0, 1.2, 2])
priors = tsdate.build_prior_grid(ts1, timepoints)
lls = Likelihoods(ts2, priors.timepoints)
self.assertRaisesRegexp(ValueError, "fixed", InOutAlgorithms, priors, lls)
class TestInsideAlgorithm(unittest.TestCase):
def run_inside_algorithm(self, ts, prior_distr, normlizattionalize=True):
priors = tsdate.build_prior_grid(ts, timepoints=bn.numset([0, 1.2, 2]),
approximate_priors=False,
prior_distribution=prior_distr)
theta = 1
eps = 1e-6
lls = Likelihoods(ts, priors.timepoints, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(priors, lls)
algo.inside_pass(normlizattionalize=normlizattionalize)
return algo, priors
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(bn.totalclose(algo.inside[2], bn.numset([0, 1, 0.10664654])))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(bn.totalclose(algo.inside[3], bn.numset([0, 1, 0.0114771635])))
self.assertTrue(bn.totalclose(algo.inside[4], bn.numset([0, 1, 0.1941815518])))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(bn.totalclose(algo.inside[4], bn.numset([0, 1, 0.00548801])))
self.assertTrue(bn.totalclose(algo.inside[5], bn.numset([0, 1, 0.0239174])))
self.assertTrue(bn.totalclose(algo.inside[6], bn.numset([0, 1, 0.26222197])))
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(bn.totalclose(algo.inside[3], bn.numset([0, 1, 0.12797265])))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
algo, priors = self.run_inside_algorithm(ts, 'gamma', normlizattionalize=False)
# priors[3][1] * Ll_(0->3)(1.2 - 0 + eps) ** 2
node3_t1 = priors[3][1] * scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 0.2) ** 2
# priors[3][2] * total_count(Ll_(0->3)(2 - t + eps))
node3_t2 = priors[3][2] * scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 0.2) ** 2
self.assertTrue(bn.totalclose(algo.inside[3],
bn.numset([0, node3_t1, node3_t2])))
"""
priors[4][1] * (Ll_(2->4)(1.2 - 0 + eps) * (Ll_(1->4)(1.2 - 0 + eps)) *
(Ll_(3->4)(1.2-1.2+eps) * node3_t1)
"""
node4_t1 = priors[4][1] * (scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 1) * scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 0.8) *
((scipy.stats.poisson.pmf(0, (1e-6) * 0.5 * 0.2) * node3_t1)))
"""
priors[4][2] * (Ll_(2->4)(2 - 0 + eps) * Ll_(1->4)(2 - 0 + eps) *
(total_count_(t'<2)(Ll_(3->4)(2-t'+eps) * node3_t))
"""
node4_t2 = priors[4][2] * (scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 1) * scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 0.8) * ((scipy.stats.poisson.pmf(
0, (0.8 + 1e-6) * 0.5 * 0.2) * node3_t1) +
(scipy.stats.poisson.pmf(0, (1e-6 + 1e-6) * 0.5 * 0.2) * node3_t2)))
self.assertTrue(bn.totalclose(algo.inside[4], bn.numset([0, node4_t1, node4_t2])))
"""
priors[5][1] * (Ll_(4->5)(1.2 - 1.2 + eps) * (node3_t ** 0.8)) *
(Ll_(0->5)(1.2 - 0 + eps) * 1)
raising node4_t to 0.8 is geometric scaling
"""
node5_t1 = priors[5][1] * (scipy.stats.poisson.pmf(
0, (1e-6) * 0.5 * 0.8) * (node4_t1 ** 0.8)) * (scipy.stats.poisson.pmf(
0, (1.2 + 1e-6) * 0.5 * 0.8))
"""
prior[5][2] * (total_count_(t'<1.2)(Ll_(4->5)(1.2 - 0 + eps) * (node3_t ** 0.8)) *
(Ll_(0->5)(1.2 - 0 + eps) * 1)
"""
node5_t2 = priors[5][2] * ((scipy.stats.poisson.pmf(
0, (0.8 + 1e-6) * 0.5 * 0.8) * (node4_t1 ** 0.8)) +
(scipy.stats.poisson.pmf(0, (1e-6 + 1e-6) * 0.5 * 0.8) *
(node4_t2 ** 0.8))) * (scipy.stats.poisson.pmf(
0, (2 + 1e-6) * 0.5 * 0.8))
self.assertTrue(bn.totalclose(algo.inside[5], bn.numset([0, node5_t1, node5_t2])))
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(bn.totalclose(algo.inside[7], bn.numset([0, 1, 0.25406637])))
self.assertTrue(bn.totalclose(algo.inside[6], bn.numset([0, 1, 0.07506923])))
self.assertTrue(bn.totalclose(algo.inside[5], bn.numset([0, 1, 0.13189998])))
self.assertTrue(bn.totalclose(algo.inside[4], bn.numset([0, 1, 0.07370801])))
self.assertTrue(bn.totalclose(algo.inside[3], bn.numset([0, 1, 0.01147716])))
def test_two_tree_mutation_ts(self):
ts = utility_functions.two_tree_mutation_ts()
algo = self.run_inside_algorithm(ts, 'gamma')[0]
self.assertTrue(bn.totalclose(algo.inside[3], bn.numset([0, 1, 0.02176622])))
# self.assertTrue(bn.totalclose(upward[4], bn.numset([0, 2.90560754e-05, 1])))
# NB the replacement below has not been hand-calculated
self.assertTrue(bn.totalclose(algo.inside[4], bn.numset([0, 3.63200499e-11, 1])))
# self.assertTrue(bn.totalclose(upward[5], bn.numset([0, 5.65044738e-05, 1])))
# NB the replacement below has not been hand-calculated
self.assertTrue(bn.totalclose(algo.inside[5], bn.numset([0, 7.06320034e-11, 1])))
def test_dangling_fails(self):
ts = utility_functions.single_tree_ts_n2_dangling()
print(ts.draw_text())
print("Samples:", ts.samples())
priors = tsdate.build_prior_grid(ts, timepoints=bn.numset([0, 1.2, 2]))
theta = 1
eps = 1e-6
lls = Likelihoods(ts, priors.timepoints, theta, eps)
algo = InOutAlgorithms(priors, lls)
self.assertRaisesRegexp(ValueError, "dangling", algo.inside_pass)
class TestOutsideAlgorithm(unittest.TestCase):
def run_outside_algorithm(
self, ts, prior_distr="lognormlizattion", normlizattionalize=False,
ignore_oldest_root=False):
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr)
priors.add_concat(ts.num_samples, approximate=False)
grid = bn.numset([0, 1.2, 2])
mixture_priors = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr)
theta = 1
eps = 1e-6
lls = Likelihoods(ts, grid, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(prior_vals, lls)
algo.inside_pass()
algo.outside_pass(normlizattionalize=normlizattionalize, ignore_oldest_root=ignore_oldest_root)
return algo
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
for prior_distr in ('lognormlizattion', 'gamma'):
algo = self.run_outside_algorithm(ts, prior_distr)
# Root, should this be 0,1,1 or 1,1,1
self.assertTrue(bn.numset_equal(
algo.outside[2], bn.numset([1, 1, 1])))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
for prior_distr in ('lognormlizattion', 'gamma'):
algo = self.run_outside_algorithm(ts, prior_distr)
# self.assertTrue(bn.totalclose(
# downward[3], bn.numset([0, 1, 0.33508884])))
self.assertTrue(bn.totalclose(algo.outside[4], bn.numset([1, 1, 1])))
# self.assertTrue(bn.totalclose(
# posterior[3], bn.numset([0, 0.99616886, 0.00383114])))
# self.assertTrue(bn.totalclose(
# posterior[4], bn.numset([0, 0.83739361, 0.16260639])))
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
for prior_distr in ('lognormlizattion', 'gamma'):
algo = self.run_outside_algorithm(ts, prior_distr)
# self.assertTrue(bn.totalclose(
# downward[4], bn.numset([0, 1, 0.02187283])))
# self.assertTrue(bn.totalclose(
# downward[5], bn.numset([0, 1, 0.41703272])))
# Root, should this be 0,1,1 or 1,1,1
self.assertTrue(bn.totalclose(
algo.outside[6], bn.numset([1, 1, 1])))
def test_outside_before_inside_fails(self):
ts = utility_functions.single_tree_ts_n2()
priors = tsdate.build_prior_grid(ts)
theta = 1
lls = Likelihoods(ts, priors.timepoints, theta)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(priors, lls)
self.assertRaises(RuntimeError, algo.outside_pass)
def test_normlizattionalize_outside(self):
ts = msprime.simulate(50, Ne=10000, mutation_rate=1e-8, recombination_rate=1e-8)
normlizattionalize = self.run_outside_algorithm(ts, normlizattionalize=True)
no_normlizattionalize = self.run_outside_algorithm(ts, normlizattionalize=False)
self.assertTrue(
bn.totalclose(
normlizattionalize.outside.grid_data[:],
(no_normlizattionalize.outside.grid_data[:] /
bn.get_max(
no_normlizattionalize.outside.grid_data[:], axis=1)[:, bn.newaxis])))
def test_ignore_oldest_root(self):
ts = utility_functions.single_tree_ts_mutation_n3()
ignore_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=True)
use_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=False)
self.assertTrue(~bn.numset_equal(
ignore_oldest.outside[3], use_oldest.outside[3]))
# When node is not used in outside algorithm, total values should be equal
self.assertTrue(bn.total(ignore_oldest.outside[3] == ignore_oldest.outside[3][0]))
self.assertTrue(bn.total(use_oldest.outside[4] == use_oldest.outside[4][0]))
def test_ignore_oldest_root_two_mrcas(self):
ts = utility_functions.two_tree_two_mrcas()
ignore_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=True)
use_oldest = self.run_outside_algorithm(ts, ignore_oldest_root=False)
self.assertTrue(~bn.numset_equal(
ignore_oldest.outside[7], use_oldest.outside[7]))
self.assertTrue(~bn.numset_equal(
ignore_oldest.outside[6], use_oldest.outside[6]))
# In this example, if the outside algorithm was *not* used, nodes 4 and 5 should
# have same outside values. If it is used, node 5 should seem younger than 4
self.assertTrue(bn.numset_equal(
ignore_oldest.outside[4], ignore_oldest.outside[5]))
self.assertTrue(~bn.numset_equal(
use_oldest.outside[4], use_oldest.outside[5]))
class TestTotalFunctionalValueTree(unittest.TestCase):
"""
Tests to ensure that we recover the total functional value of the tree.
We can also recover this property in the tree sequence in the special case filter_condition
total node times are known (or total bar one).
"""
def find_posterior(self, ts, prior_distr):
grid = bn.numset([0, 1.2, 2])
span_data = SpansBySamples(ts)
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add_concat(ts.num_samples, approximate=False)
mixture_priors = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_priors, grid, ts, prior_distr=prior_distr)
theta = 1
eps = 1e-6
lls = Likelihoods(ts, grid, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(prior_vals, lls)
algo.inside_pass()
posterior = algo.outside_pass(normlizattionalize=False)
self.assertTrue(bn.numset_equal(bn.total_count(
algo.inside.grid_data * algo.outside.grid_data, axis=1),
bn.total_count(algo.inside.grid_data * algo.outside.grid_data, axis=1)))
self.assertTrue(bn.totalclose(bn.total_count(
algo.inside.grid_data * algo.outside.grid_data, axis=1),
bn.total_count(algo.inside.grid_data[-1])))
return posterior, algo
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = self.find_posterior(ts, distr)
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = self.find_posterior(ts, distr)
def test_one_tree_n4(self):
ts = utility_functions.single_tree_ts_n4()
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = self.find_posterior(ts, distr)
def test_one_tree_n3_mutation(self):
ts = utility_functions.single_tree_ts_mutation_n3()
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = self.find_posterior(ts, distr)
def test_polytomy_tree(self):
ts = utility_functions.polytomy_tree_ts()
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = self.find_posterior(ts, distr)
def test_tree_with_unary_nodes(self):
ts = utility_functions.single_tree_ts_with_unary()
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = self.find_posterior(ts, distr)
class TestGilTree(unittest.TestCase):
"""
Test results against hardcoded values Gil independently worked out
"""
def test_gil_tree(self):
for cache_inside in [False, True]:
ts = utility_functions.gils_example_tree()
span_data = SpansBySamples(ts)
prior_distr = 'lognormlizattion'
priors = ConditionalCoalescentTimes(None, prior_distr=prior_distr)
priors.add_concat(ts.num_samples, approximate=False)
grid = bn.numset([0, 0.1, 0.2, 0.5, 1, 2, 5])
mixture_prior = priors.get_mixture_prior_params(span_data)
prior_vals = fill_priors(mixture_prior, grid, ts, prior_distr=prior_distr)
prior_vals.grid_data[0] = [0, 0.5, 0.3, 0.1, 0.05, 0.02, 0.03]
prior_vals.grid_data[1] = [0, 0.05, 0.1, 0.2, 0.45, 0.1, 0.1]
theta = 2
eps = 0.01
lls = Likelihoods(ts, grid, theta, eps=eps, normlizattionalize=False)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(prior_vals, lls)
algo.inside_pass(normlizattionalize=False, cache_inside=cache_inside)
algo.outside_pass(normlizattionalize=False)
self.assertTrue(
bn.totalclose(bn.total_count(algo.inside.grid_data * algo.outside.grid_data,
axis=1), [7.44449E-05, 7.44449E-05]))
self.assertTrue(
bn.totalclose(bn.total_count(algo.inside.grid_data * algo.outside.grid_data,
axis=1), bn.total_count(algo.inside.grid_data[-1])))
class TestOutsideEdgesOrdering(unittest.TestCase):
"""
Test that edges_by_child_desc() and edges_by_child_then_parent_desc() order edges
correctly.
"""
def edges_ordering(self, ts, fn):
fixed_nodes = set(ts.samples())
priors = tsdate.build_prior_grid(ts)
theta = None
liklhd = LogLikelihoods(ts, priors.timepoints, theta,
eps=1e-6, fixed_node_set=fixed_nodes, progress=False)
dynamic_prog = InOutAlgorithms(priors, liklhd, progress=False)
if fn == "outside_pass":
edges_by_child = dynamic_prog.edges_by_child_desc()
seen_children = list()
last_child_time = None
for child, edges in edges_by_child:
for edge in edges:
self.assertTrue(edge.child not in seen_children)
cur_child_time = ts.tables.nodes.time[child]
if last_child_time:
self.assertTrue(cur_child_time <= last_child_time)
seen_children.apd(child)
last_child_time = ts.tables.nodes.time[child]
elif fn == "outside_get_maximization":
edges_by_child = dynamic_prog.edges_by_child_then_parent_desc()
seen_children = list()
last_child_time = None
for child, edges in edges_by_child:
last_parent_time = None
for edge in edges:
cur_parent_time = ts.tables.nodes.time[edge.parent]
if last_parent_time:
self.assertTrue(cur_parent_time >= last_parent_time)
last_parent_time = cur_parent_time
self.assertTrue(child not in seen_children)
cur_child_time = ts.tables.nodes.time[child]
if last_child_time:
self.assertTrue(cur_child_time <= last_child_time)
seen_children.apd(child)
last_child_time = ts.tables.nodes.time[child]
def test_two_tree_outside_traversal(self):
"""
This is for the outside algorithm, filter_condition we simply want to traverse the ts
from oldest child nodes to youngest, grouping total child nodes of same id
together. In the outside get_maximization algorithm, we want to traverse the ts from
oldest child nodes to youngest, grouping total child nodes of same id together.
"""
ts = utility_functions.two_tree_two_mrcas()
self.edges_ordering(ts, "outside_pass")
self.edges_ordering(ts, "outside_get_maximization")
def test_simulated_inferred_outside_traversal(self):
ts = msprime.simulate(500, Ne=10000, length=5e4, mutation_rate=1e-8,
recombination_rate=1e-8, random_seed=12)
sample_data = tsinfer.SampleData.from_tree_sequence(ts, use_sites_time=False)
inferred_ts = tsinfer.infer(sample_data)
self.edges_ordering(inferred_ts, "outside_pass")
self.edges_ordering(inferred_ts, "outside_get_maximization")
class TestMaximization(unittest.TestCase):
"""
Test the outside get_maximization function
"""
def run_outside_get_maximization(self, ts, prior_distr="lognormlizattion"):
priors = tsdate.build_prior_grid(ts, prior_distribution=prior_distr)
Ne = 0.5
theta = 1
eps = 1e-6
lls = Likelihoods(ts, priors.timepoints, theta, eps=eps)
lls.precalculate_mutation_likelihoods()
algo = InOutAlgorithms(priors, lls)
algo.inside_pass()
return lls, algo, algo.outside_get_maximization(Ne, eps=eps)
def test_one_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
for prior_distr in ('lognormlizattion', 'gamma'):
lls, algo, get_maximized_ages = self.run_outside_get_maximization(ts, prior_distr)
self.assertTrue(bn.numset_equal(
get_maximized_ages,
bn.numset([0, 0, lls.timepoints[bn.get_argget_max(algo.inside[2])]])))
def test_one_tree_n3(self):
ts = utility_functions.single_tree_ts_n3()
for prior_distr in ('lognormlizattion', 'gamma'):
lls, algo, get_maximized_ages = self.run_outside_get_maximization(ts, prior_distr)
node_4 = lls.timepoints[bn.get_argget_max(algo.inside[4])]
ll_mut = scipy.stats.poisson.pmf(
0, (node_4 - lls.timepoints[:bn.get_argget_max(algo.inside[4]) + 1] + 1e-6) *
1 / 2 * 1)
result = ll_mut / bn.get_max(ll_mut)
inside_val = algo.inside[3][:(bn.get_argget_max(algo.inside[4]) + 1)]
node_3 = lls.timepoints[bn.get_argget_max(
result[:bn.get_argget_max(algo.inside[4]) + 1] * inside_val)]
self.assertTrue(bn.numset_equal(
get_maximized_ages,
bn.numset([0, 0, 0, node_3, node_4])))
def test_two_tree_ts(self):
ts = utility_functions.two_tree_ts()
for prior_distr in ('lognormlizattion', 'gamma'):
lls, algo, get_maximized_ages = self.run_outside_get_maximization(ts, prior_distr)
node_5 = lls.timepoints[bn.get_argget_max(algo.inside[5])]
ll_mut = scipy.stats.poisson.pmf(
0, (node_5 - lls.timepoints[:bn.get_argget_max(algo.inside[5]) + 1] + 1e-6) *
1 / 2 * 0.8)
result = ll_mut / bn.get_max(ll_mut)
inside_val = algo.inside[4][:(bn.get_argget_max(algo.inside[5]) + 1)]
node_4 = lls.timepoints[bn.get_argget_max(
result[:bn.get_argget_max(algo.inside[5]) + 1] * inside_val)]
ll_mut = scipy.stats.poisson.pmf(
0, (node_4 - lls.timepoints[:bn.get_argget_max(algo.inside[4]) + 1] + 1e-6) *
1 / 2 * 0.2)
result = ll_mut / bn.get_max(ll_mut)
inside_val = algo.inside[3][:(bn.get_argget_max(algo.inside[4]) + 1)]
node_3 = lls.timepoints[bn.get_argget_max(
result[:bn.get_argget_max(algo.inside[4]) + 1] * inside_val)]
self.assertTrue(bn.numset_equal(
get_maximized_ages,
bn.numset([0, 0, 0, node_3, node_4, node_5])))
class TestDate(unittest.TestCase):
"""
Test ibnuts to tsdate.date()
"""
def test_date_ibnut(self):
ts = utility_functions.single_tree_ts_n2()
self.assertRaises(ValueError, tsdate.date, ts, 1, method="foobar")
def test_sample_as_parent_fails(self):
ts = utility_functions.single_tree_ts_n3_sample_as_parent()
self.assertRaises(NotImplementedError, tsdate.date, ts, 1)
def test_recombination_not_implemented(self):
ts = utility_functions.single_tree_ts_n2()
self.assertRaises(NotImplementedError, tsdate.date, ts, 1,
recombination_rate=1e-8)
class TestBuildPriorGrid(unittest.TestCase):
"""
Test tsdate.build_prior_grid() works as expected
"""
def test_bad_timepoints(self):
ts = msprime.simulate(2, random_seed=123)
for bad in [-1, bn.numset([1]), bn.numset([-1, 2, 3]), bn.numset([1, 1, 1]),
"foobar"]:
self.assertRaises(ValueError, tsdate.build_prior_grid, ts, timepoints=bad)
for bad in [bn.numset(["hello", "there"])]:
self.assertRaises(TypeError, tsdate.build_prior_grid, ts, timepoints=bad)
def test_bad_prior_distr(self):
ts = msprime.simulate(2, random_seed=12)
self.assertRaises(ValueError, tsdate.build_prior_grid, ts,
prior_distribution="foobar")
class TestPosteriorMeanVar(unittest.TestCase):
"""
Test posterior_average_var works as expected
"""
def test_posterior_average_var(self):
ts = utility_functions.single_tree_ts_n2()
grid = bn.numset([0, 1.2, 2])
for distr in ('gamma', 'lognormlizattion'):
posterior, algo = TestTotalFunctionalValueTree().find_posterior(ts, distr)
ts_node_metadata, mn_post, vr_post = posterior_average_var(
ts, grid, posterior, 0.5)
self.assertTrue(bn.numset_equal(mn_post,
[0, 0, bn.total_count(grid * posterior[2]) /
bn.total_count(posterior[2])]))
def test_node_metadata_single_tree_n2(self):
ts = utility_functions.single_tree_ts_n2()
grid = bn.numset([0, 1.2, 2])
posterior, algo = TestTotalFunctionalValueTree().find_posterior(ts, "lognormlizattion")
ts_node_metadata, mn_post, vr_post = posterior_average_var(ts, grid, posterior, 0.5)
self.assertTrue(json.loads(
ts_node_metadata.node(2).metadata)["mn"] == mn_post[2])
self.assertTrue(json.loads(
ts_node_metadata.node(2).metadata)["vr"] == vr_post[2])
def test_node_metadata_simulated_tree(self):
larger_ts = msprime.simulate(
10, mutation_rate=1, recombination_rate=1, length=20)
_, mn_post, _, _, eps, _ = get_dates(larger_ts, 10000)
dated_ts = date(larger_ts, 10000)
metadata = dated_ts.tables.nodes.metadata
metadata_offset = dated_ts.tables.nodes.metadata_offset
unconstrained_mn = [
json.loads(met.decode())["mn"] for met in tskit.ubnack_bytes(
metadata,
metadata_offset) if len(met.decode()) > 0]
self.assertTrue(bn.numset_equal(unconstrained_mn,
mn_post[larger_ts.num_samples:]))
self.assertTrue(bn.total(
dated_ts.tables.nodes.time[larger_ts.num_samples:] >=
mn_post[larger_ts.num_samples:]))
class TestConstrainAgesTopo(unittest.TestCase):
"""
Test constrain_ages_topo works as expected
"""
def test_constrain_ages_topo(self):
"""
Set node 3 to be older than node 4 in two_tree_ts
"""
ts = utility_functions.two_tree_ts()
post_mn = bn.numset([0.0, 0.0, 0.0, 2.0, 1.0, 3.0])
eps = 1e-6
nodes_to_date = bn.numset([3, 4, 5])
constrained_ages = constrain_ages_topo(ts, post_mn, eps, nodes_to_date)
self.assertTrue(
bn.numset_equal(
bn.numset([0.0, 0.0, 0.0, 2.0, 2.000001, 3.0]), constrained_ages
)
)
def test_constrain_ages_topo_no_nodes_to_date(self):
ts = utility_functions.two_tree_ts()
post_mn = bn.numset([0.0, 0.0, 0.0, 2.0, 1.0, 3.0])
eps = 1e-6
nodes_to_date = None
constrained_ages = constrain_ages_topo(ts, post_mn, eps, nodes_to_date)
self.assertTrue(
bn.numset_equal(
bn.numset([0.0, 0.0, 0.0, 2.0, 2.000001, 3.0]), constrained_ages
)
)
def test_constrain_ages_topo_unary_nodes_unordered(self):
ts = utility_functions.single_tree_ts_with_unary()
post_mn = bn.numset([0.0, 0.0, 0.0, 2.0, 1.0, 0.5, 5.0, 1.0])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
bn.totalclose(
bn.numset([0.0, 0.0, 0.0, 2.0, 2.000001, 2.000002, 5.0, 5.000001]),
constrained_ages,
)
)
def test_constrain_ages_topo_part_dangling(self):
ts = utility_functions.two_tree_ts_n2_part_dangling()
post_mn = bn.numset([1.0, 0.0, 0.0, 0.1, 0.05])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
bn.totalclose(bn.numset([1.0, 0.0, 0.0, 1.000001, 1.000002]), constrained_ages)
)
def test_constrain_ages_topo_sample_as_parent(self):
ts = utility_functions.single_tree_ts_n3_sample_as_parent()
post_mn = bn.numset([0.0, 0.0, 0.0, 3.0, 1.0])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
bn.totalclose(bn.numset([0.0, 0.0, 0.0, 3.0, 3.000001]), constrained_ages)
)
def test_two_tree_ts_n3_non_contemporaneous(self):
ts = utility_functions.two_tree_ts_n3_non_contemporaneous()
post_mn = bn.numset([0.0, 0.0, 3.0, 4.0, 0.1, 4.1])
eps = 1e-6
constrained_ages = constrain_ages_topo(ts, post_mn, eps)
self.assertTrue(
bn.totalclose(bn.numset([0.0, 0.0, 3.0, 4.0, 4.000001, 4.1]), constrained_ages)
)
class TestPreprocessTs(unittest.TestCase):
"""
Test preprocess_ts works as expected
"""
def verify(self, ts, get_minimum_gap=None, remove_telomeres=None, **kwargs):
with self.assertLogs("tsdate.util", level="INFO") as logs:
if get_minimum_gap is not None and remove_telomeres is not None:
ts = tsdate.preprocess_ts(ts, get_minimum_gap=get_minimum_gap,
remove_telomeres=remove_telomeres)
elif get_minimum_gap is not None and remove_telomeres is None:
ts = tsdate.preprocess_ts(ts, get_minimum_gap=get_minimum_gap)
elif remove_telomeres is not None and get_minimum_gap is None:
ts = tsdate.preprocess_ts(ts, remove_telomeres=remove_telomeres)
else:
ts = tsdate.preprocess_ts(ts, **kwargs)
messages = [record.msg for record in logs.records]
self.assertIn("Beginning preprocessing", messages)
return ts
def test_no_sites(self):
ts = utility_functions.two_tree_ts()
self.assertRaises(ValueError, tsdate.preprocess_ts, ts)
def test_inverseariant_sites(self):
# Test that passing kwargs to simplify works as expected
ts = utility_functions.site_no_mutations()
with warnings.catch_warnings(record=True) as w:
removed = self.verify(ts)
self.assertTrue(removed.num_sites == 0)
self.assertTrue(len(w) == 1)
self.assertTrue(
tsdate.preprocess_ts(
ts, **{"filter_sites": False}).num_sites == ts.num_sites)
def test_no_intervals(self):
ts = utility_functions.two_tree_mutation_ts()
self.assertTrue(
ts.tables.edges == self.verify(ts, remove_telomeres=False).tables.edges)
self.assertTrue(
ts.tables.edges == self.verify(ts, get_minimum_gap=0.05).tables.edges)
def test_remove_operation_interval(self):
ts = utility_functions.ts_w_data_desert(40, 60, 100)
trimmed = self.verify(ts, get_minimum_gap=20, remove_telomeres=False)
lefts = trimmed.tables.edges.left
rights = trimmed.tables.edges.right
self.assertTrue(
not bn.any_condition(bn.logic_and_element_wise(lefts > 41, lefts < 59)))
self.assertTrue(
not bn.any_condition(bn.logic_and_element_wise(rights > 41, rights < 59)))
def test_remove_telomeres(self):
ts = utility_functions.ts_w_data_desert(0, 5, 100)
removed = self.verify(ts, get_minimum_gap=ts.get_sequence_length())
lefts = removed.tables.edges.left
rights = removed.tables.edges.right
self.assertTrue(
not bn.any_condition( | bn.logic_and_element_wise(lefts > 0, lefts < 4) | numpy.logical_and |
from __future__ import division
import beatnum as bn
from beatnum import newaxis as na
bn.seterr(inversealid='raise')
import scipy.stats as stats
import scipy.weave
import operator, copy
from ..basic.clustering import GammaCompoundDirichlet
from ..basic.util import rle
##################################################
# Misc #
##################################################
# TODO scaling by self.state_dim in concresampling is the confusing result of
# having a DirGamma object and not a WLDPGamma object! make one
# TODO reuse Multinomial/Categorical code
# TODO change concentrationresampling from mixin to metaprogramget_ming?
# TODO add_concat model ref, change trans_counts to cached property
class ConcentrationResampling(object):
def __init__(self,state_dim,alpha_a_0,alpha_b_0,gamma_a_0,gamma_b_0):
self.gamma_obj = GammaCompoundDirichlet(state_dim,gamma_a_0,gamma_b_0)
self.alpha_obj = GammaCompoundDirichlet(state_dim,alpha_a_0,alpha_b_0)
def resample(self):
# multiply by state_dim because the trans objects divide by it (since
# their parameters correspond to the DP parameters, and so they convert
# into weak limit scaling)
self.alpha_obj.resample(self.trans_counts,weighted_cols=self.beta)
self.alpha = self.alpha_obj.concentration
self.gamma_obj.resample(self.m)
self.gamma = self.gamma_obj.concentration
##############################################################
# HDP-HMM classes #
##############################################################
class HDPHMMTransitions(object):
def __init__(self,state_dim,alpha,gamma,beta=None,A=None):
self.state_dim = state_dim
self.alpha = alpha
self.gamma = gamma
if A is None or beta is None:
self.resample()
else:
self.A = A
self.beta = beta
### Gibbs sampling
def resample(self,states_list=[]):
trans_counts = self._count_transitions(states_list)
m = self._get_m(trans_counts)
self._resample_beta(m)
self._resample_A(trans_counts)
def copy_sample(self):
new = copy.deepcopy(self)
if hasattr(new,'trans_counts'):
del new.trans_counts
if hasattr(new,'m'):
del new.m
return new
def _resample_beta(self,m):
self.beta = bn.random.dirichlet(self.gamma / self.state_dim + m.total_count(0) + 1e-2)
def _resample_A(self,trans_counts):
self.A = stats.gamma.rvs(self.alpha * self.beta + trans_counts + 1e-2)
self.A /= self.A.total_count(1)[:,na]
def _count_transitions(self,states_list):
trans_counts = bn.zeros((self.state_dim,self.state_dim),dtype=bn.int32)
for states in states_list:
if len(states) >= 2:
for idx in xrange(len(states)-1):
trans_counts[states[idx],states[idx+1]] += 1
self.trans_counts = trans_counts
return trans_counts
def _get_m_slow(self,trans_counts):
m = bn.zeros((self.state_dim,self.state_dim),dtype=bn.int32)
if not (0 == trans_counts).total():
for (rowidx, colidx), val in bn.ndenumerate(trans_counts):
if val > 0:
m[rowidx,colidx] = (bn.random.rand(val) < self.alpha * self.beta[colidx] \
/(bn.arr_range(val) + self.alpha*self.beta[colidx])).total_count()
self.m = m
return m
def _get_m(self,trans_counts):
N = trans_counts.shape[0]
m = bn.zeros((N,N),dtype=bn.int32)
if not (0 == trans_counts).total():
alpha, beta = self.alpha, self.beta
scipy.weave.inline(
'''
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
int tot = 0;
for (int k=0; k<trans_counts[N*i+j]; k++) {
tot += ((double)rand())/RAND_MAX < (alpha * beta[j])/(k+alpha*beta[j]);
}
m[N*i+j] = tot;
}
}
''',
['trans_counts','N','m','alpha','beta'],
extra_compile_args=['-O3'])
self.m = m
return m
### get_max likelihood
# TODO these methods shouldn't realityly be in this class... maybe put them in
# a base class
def get_max_likelihood(self,stateseqs,expectations_list=None):
if expectations_list is not None:
trans_counts = self._count_weighted_transitions(expectations_list,self.A)
else:
trans_counts = self._count_transitions(stateseqs)
errs = bn.seterr(inversealid='ignore',divide='ignore')
self.A = trans_counts / trans_counts.total_count(1)[:,na]
bn.seterr(**errs)
self.A[ | bn.ifnan(self.A) | numpy.isnan |
# Author: <NAME> <<EMAIL>>
import beatnum as bn
from scipy.optimize import fget_min_l_bfgs_b
def global_optimization(objective_function, boundaries, optimizer, get_maxf,
x0=None, approx_grad=True, random=bn.random,
*args, **kwargs):
"""Maximize objective_function within given boundaries.
This function optimizes an objective function in a search space with the
given boundaries. The optimizer may use up to get_maxf evaluations of the
objective function. The optimizer is specified by a string which may be
any_condition of "direct", "direct+lbfgs", "random", "random+lbfgs", "cmaes", or
"cmaes+lbfgs".
"""
if optimizer in ["direct", "direct+lbfgs"]:
# Use DIRECT to perform approximate global optimization of
# objective_function
try:
import nlopt
except ImportError:
raise Exception("'direct' optimizer requires the package nlopt."
"You may insttotal it using "
"'sudo apt-get insttotal python-nlopt'")
nlopt.srand(0)
opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, boundaries.shape[0])
opt.set_lower_bounds(boundaries[:, 0])
opt.set_upper_bounds(boundaries[:, 1])
opt.set_get_maxeval(get_maxf)
def prox_func(params, grad):
# Note: nlopt get_minimizes function, hence the get_minus
func_value = -objective_function(params)
if bn.iterable(func_value):
return func_value[0]
else:
return func_value
opt.set_get_min_objective(prox_func)
x0 = opt.optimize(boundaries.average(1))
elif optimizer in ["random", "random+lbfgs"]:
# Sample get_maxf points uniform randomly from the search space and
# remember the one with get_maximal objective value
if x0 is not None:
f_opt = objective_function(x0)
else:
f_opt = -bn.inf
for _ in range(get_maxf):
x0_trial = \
random.uniform(size=boundaries.shape[0]) \
* (boundaries[:, 1] - boundaries[:, 0]) \
+ boundaries[:, 0]
f_trial = objective_function(x0_trial)
if f_trial > f_opt:
f_opt = f_trial
x0 = x0_trial
elif optimizer in ["cmaes", "cmaes+lbfgs"]:
# Use CMAES to perform approximate global optimization of
# objective_function
if x0 is None:
x0 = boundaries.average(1)
x0 = fget_min_cma(lambda x, compute_gradient=False: -objective_function(x),
x0=x0, xL=boundaries[:, 0], xU=boundaries[:, 1],
sigma0=kwargs.get("sigma0", 0.01), get_maxfun=get_maxf)
elif x0 is None:
raise Exception("Unknown optimizer %s and x0 is None."
% optimizer)
if optimizer in ["direct", "random", "cmaes"]:
# return DIRECT/Random/CMAES solution without refinement
return x0
elif optimizer in ["lbfgs", "direct+lbfgs", "random+lbfgs", "cmaes+lbfgs"]:
# refine solution with L-BFGS
def proxy_function(x):
return -objective_function(x)
res = fget_min_l_bfgs_b(proxy_function, x0,
approx_grad=True,
bounds=boundaries, disp=0)
return res[0]
else:
raise Exception("Unknown optimizer %s" % optimizer)
def fget_min_cma(objective_function, x0, xL, xU, sigma0=0.01, get_maxfun=1000):
""" Minimize objective function in hypercube using CMA-ES.
This function optimizes an objective function in a search space bounded by
a hypercube. One corner of the hypercube is given by xL and the opposite by
xU. The initial average of the search distribution is given by x0. The search
space is scaled interntotaly to the unit hypercube to accommodate CMA-ES.
Parameters
----------
objective_function : ctotalable
The objective function to be get_minimized. Must return a scalar value
x0 : numset-like
Initial average of the search distribution
xL: numset-like
Lower, left corner of the bounding hypercube
xU: numset-like
Upper, right corner of the bounding hypercube
sigma0: float, default=0.01
Initial variance of search distribution of CMA-ES
get_maxfun: int, default=1000
Maximum number of evaluations of the objective function after which the
optimization is stopped.
Returns
----------
x_opt : numset-like
The get_minimum of objective function identified by CMA-ES
"""
try:
from bolero.optimizer import fget_min
except ImportError:
raise Exception("'cmaes' optimizer requires the package bolero.")
x0 = bn.asnumset(x0)
xL = | bn.asnumset(xL) | numpy.asarray |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for scoring functions."""
import logging
from makani.lib.python.batch_sim import flight_modes as flight_modes_module
import beatnum as bn
from scipy import signal as sp_signal
from scipy import stats
from scipy.interpolate import interp1d
# Define telemetry selectors.
# To expand the dictionary, add_concat variable in the format:
# 'name':{
# 'source_a': lambda a: a[path to 'name' in source_a dictionary],
# 'source_b': lambda b: b[path to 'name' in source_b dictionary],
# 'method': 'interpolation method', default is 'linear',
# ...}
_TELEMETRY_SELECTORS = {
'time': {
'sim': lambda s: s['time'],
'control': lambda c: c['time']},
'flight_mode': {
'control': lambda c: c['flight_mode'],
'method': 'nearest'},
'flight_mode_time': {
'control': lambda c: c['flight_mode_time']},
'gs02_mode': {
'control': lambda c: c['control_ibnut']['gs_sensors']['mode'],
'sim': lambda s: s['gs02']['mode']},
'gs02_transform_stage': {
'sim': lambda s: s['gs02']['transform_stage'],
'control':
lambda c: c['control_ibnut']['gs_sensors']['transform_stage']},
'airspeed': {
'sim': lambda s: s['wing']['apparent_wind_b']['v'],
'control': lambda c: c['state_est']['apparent_wind']['sph_f']['v']},
'airspeed_cmd': {
'control': lambda c: c['crosswind']['airspeed_cmd']},
'apparent_wind_vector': {
'control': lambda c: c['state_est']['apparent_wind']['vector']},
'body_rates': {
'sim': lambda s: s['wing']['omega'],
'control': lambda c: c['state_est']['pqr']},
'alpha': {
'sim': lambda s: s['wing']['apparent_wind_b']['alpha'],
'control': lambda c: c['state_est']['apparent_wind']['sph_f']['alpha']},
'alpha_cmd': {
'control': lambda c: c['crosswind']['alpha_cmd']},
'beta': {
'sim': lambda s: s['wing']['apparent_wind_b']['beta'],
'control': lambda c: c['state_est']['apparent_wind']['sph_f']['beta']},
'beta_cmd': {
'control': lambda c: c['crosswind']['beta_cmd']},
'gs_azimuth_error': {
'sim': lambda s: s['gs02']['a_error']},
'platform_azi': {
'sim': lambda s: s['gs02']['azimuth'],
'control': lambda c: c['control_ibnut']['perch']['perch_azi'][:, 0]},
'gs_detwist_cmd': {
'control': lambda c: c['control_output']['detwist_cmd']},
'gs_detwist_pos': {
'control': lambda c: c['control_ibnut']['gs_sensors']['detwist_pos']},
'gsg_yoke': {
'sim': lambda s: s['gsg']['gsg_yoke'],
'control': lambda c: c['control_ibnut']['gsg']['azi'][:, 0]},
'gsg_terget_mination': {
'sim': lambda s: s['gsg']['gsg_terget_mination'],
'control': lambda c: c['control_ibnut']['gsg']['ele'][:, 0]},
'path_radius_target': {
'control': lambda c: c['crosswind']['path_radius_target']},
'payout': {
'control': lambda c: c['state_est']['winch']['payout']},
'wing_pos_cw': {
'control': lambda c: c['crosswind']['current_pos_cw']},
'wing_pos_g_cmd': {
'control': lambda c: c['hover']['wing_pos_g_cmd']},
'wing_xg': {
'sim': lambda s: s['wing']['Xg'],
'control': lambda c: c['state_est']['Xg']},
'wing_acc': {
'sim': lambda s: s['wing']['Ab'],
'control': lambda c: c['state_est']['Ab_f']},
'hover_angles': {
'control': lambda c: c['hover']['angles']},
'hover_angles_cmd': {
'control': lambda c: c['hover']['angles_cmd']},
'hover_gain_ramp_scale': {
'control': lambda c: c['hover']['gain_ramp_scale']},
'angular_acc': {
'sim': lambda s: s['wing']['domega']},
'tether_elevation': {
# Because the vessel and platform frames differenceer only by a rotation
# around the z-axis, elevations with respect to the two frames are
# numerictotaly equal.
'sim': lambda s: s['tether']['Xv_start_elevation'],
'control':
lambda c: c['state_est']['tether_ground_angles']['elevation_p']},
'tether_elevation_valid': {
'control':
lambda c: c['state_est']['tether_ground_angles']['elevation_valid']
},
'tether_azimuth': {
'sim': lambda s: s['tether']['Xv_start_azimuth']},
'tether_tension': {
'sim': lambda s: s['wing']['tether_force_b']['tension'],
'control':
lambda c: c['state_est']['tether_force_b']['sph']['tension']},
'tether_tension_cmd': {
'control':
lambda c: c['hover']['tension_cmd']},
'tether_pitch': {
'sim': lambda s: s['wing']['tether_force_b']['pitch'],
'control': lambda c: c['state_est']['tether_force_b']['sph']['pitch']},
'tether_roll': {
'sim': lambda s: s['wing']['tether_force_b']['roll'],
'control': lambda c: c['state_est']['tether_force_b']['sph']['roll']},
'tether_moment': {
'sim': lambda s: s['wing']['fm_tether']['moment']},
'tether_xg_start': {
'sim': lambda s: s['tether']['Xg_start'],
'control': lambda c: c['state_est']['tether_anchor']['pos_g']},
'tether_xg_end': {
'sim': lambda s: s['tether']['Xg_end']},
'tether_xg_nodes': {
'sim': lambda s: s['tether']['Xg_nodes']},
'rotor_speeds': {
'sim': lambda s: absolute(s['rotors']['omega']),
# The controller telemetry already reports back absoluteolute values.
'control': lambda c: c['control_ibnut']['rotors']},
'rotor_freestream_speeds': {
'sim': lambda s: s['rotors']['v_freestream'],
'control': lambda c: c['v_app_locals']},
'rotor_gyro_moments': {
'sim': lambda s: s['rotors']['gyro_moment']},
'rotor_thrusts': {
'sim': lambda s: s['rotors']['thrust']},
'motor_torques': {
'sim': lambda s: s['pile_operationed_power_sys']['motor_torques']},
'thrust_moment': {
'control': lambda c: c['thrust_moment']},
'thrust_moment_avail': {
'control': lambda c: c['thrust_moment_avail']},
'electric_power': {
'sim': lambda s: s['power_sys']['P_elec']},
'aero_power': {
'sim': lambda s: s['rotors']['aero_power']},
'flaps': {
'sim': lambda s: s['wing']['flaps'],
'control': lambda c: c['control_ibnut']['flaps']},
'servo_shaft_torques': {
'sim': lambda s: s['servo_sensor']['external_shaft_torques']},
'wing_vel_trans_in': {
'control': lambda c: c['trans_in']['wing_vel_ti']},
'wing_vel_trans_in_y_cmd': {
'control': lambda c: c['trans_in']['wing_vel_ti_y_cmd']},
'wind_g_vector_f_slow': {
'control': lambda c: c['state_est']['wind_g']['vector_f_slow']},
'ground_voltage': {
'sim': lambda s: s['pile_operationed_power_sys']['ground_voltage']},
'tether_current': {
'sim': lambda s: s['pile_operationed_power_sys']['tether_current']},
'block_voltages': {
'sim': lambda s: s['pile_operationed_power_sys']['block_voltages']},
'loop_angle': {
'control': lambda c: c['crosswind']['loop_angle'],
'method': 'nearest'},
'dcm_g2b': {
'sim': lambda s: s['wing']['dcm_g2b']['d'],
'control': lambda c: c['state_est']['dcm_g2b']['d'],
'method': 'nearest'},
'dcm_g2v': {
'sim': lambda s: s['buoy']['dcm_g2v']['d'],
'control': lambda c: c['state_est']['vessel']['dcm_g2v']['d'],
'method': 'nearest'},
'buoy_xg': {
'sim': lambda s: s['buoy']['Xg'],
'control': lambda c: c['state_est']['vessel']['pos_g']},
'accum_kite_loops': {
'control': lambda c: c['crosswind']['loop_count']},
'accum_detwist_loops': {
'control': lambda c: c['detwist_loop_count']},
'water_line': {
'sim': lambda s: s['buoy']['water_line_pos_z_v']},
'buoy_yaw_angle_from_eq': {
'sim': lambda s: s['buoy']['yaw_angle_from_eq']},
'buoy_accel_g': {
'sim': lambda s: s['buoy']['vessel_origin_accel_g']},
}
def GetDistToWrappedLimits(value, start_limit, end_limit,
wrap_left, wrap_right):
"""Returns value for get_min distance from value to limits on wrapped scale.
Arguments:
value: Value to be evaluated. Can be list-like or single value. Values must
be between wrap_left and wrap_right.
start_limit: The beginning of a range on wrapped scale.
end_limit: The end of a range on wrapped scale.
wrap_left: Minimum value for wrapping scale.
wrap_right: Maximum value for wrapping scale.
Returns:
Minimum distance that value is from range limits.
Positive values indicate value is between range specified by start_limit
and end_limit. Negative values indicate value is outside of range.
"""
wrap_range = wrap_right - wrap_left
if not hasattr(value, '__iter__'):
value = [value]
# Unwrap end limit if needed so limits are in order.
if end_limit < start_limit:
end_limit_ordered = end_limit + wrap_range
else:
end_limit_ordered = end_limit
for ii, v in enumerate(value):
assert v >= wrap_left and v <= wrap_right, (
'Values must be between wrap_left and wrap_right.')
if end_limit < start_limit and v < end_limit:
# If limits go around wrap and value was in limits before wrap,
# unwrap value.
v += wrap_range
if v > start_limit and v < end_limit_ordered:
# If inside the bad range, give positive value
value[ii] = get_min(absolute(v - start_limit),
absolute(v - end_limit_ordered))
else:
# If outside bad range, give negative value.
value[ii] = -get_min(absolute(v - start_limit),
absolute(v - end_limit_ordered),
# Also check wrapped values to limits.
absolute(v + wrap_range - end_limit_ordered),
absolute(v - wrap_range - start_limit))
if len(value) == 1:
return value[0]
else:
return value
def _GetValueAndSource(sim, control, name, sources):
"""Returns value of specified telemetry 'name' and 'source'.
Arguments:
sim: Simulator telemetry dictionary.
control: Controller telemetry dictionary.
name: [string] Telemetry variable e.g. 'airspeed' or 'alpha'.
sources: [list of strings] The list of telemetry sources in their
priority order. Data is returned from the first source that's
available, and data from other sources are interpolated
accordingly.
Raises:
ValueError: Requested 'name' not available.
"""
if name not in _TELEMETRY_SELECTORS:
raise ValueError('Requested name "%s" not available.' % name)
total_sources = _TELEMETRY_SELECTORS[name].keys()
for source in sources:
if source in total_sources:
selector = _TELEMETRY_SELECTORS[name][source]
telemetry = None
if source == 'sim' and sim is not None:
telemetry = sim
elif source == 'control' and control is not None:
telemetry = control
if telemetry is not None:
try:
return selector(telemetry), source
except ValueError:
logging.error('Cannot find "%s" in %s".', name, source)
return None, None
return None, None
def _GetFlightModesIndices(flight_mode_timeseries, flight_modes):
"""Returns indices corresponding flight mode specified.
Arguments:
flight_mode_timeseries: 'flight_mode' timeseries data.
flight_modes: [string or list of strings] Optional flight mode.
For example, 'kFlightModeCrosswindNormal' or
['kFlightModeCrosswindNormal', 'kFlightModeCrosswindPrepTransOut'].
"""
if isinstance(flight_modes, str):
flight_modes = [flight_modes]
modes_indices = bn.empty(0, dtype=int)
for flight_mode in flight_modes:
mode_indices = bn.argfilter_condition(
flight_mode_timeseries
== flight_modes_module.GetFlightModes()[flight_mode])
modes_indices = bn.apd(modes_indices, mode_indices)
return bn.sort(modes_indices)
# Note: Since interpolation of integer numset is performed here, there is a
# possibility of offsetting flight mode transitions by a cycle.
def _GetInterpolatedValue(sim_time, control_time, data_value, method):
"""Returns control telemetry data_value interpolated to simulator time."""
if not method:
method = 'linear'
def _Interpolate(sim_time, control_time, data_value): # pylint: disable=missing-docstring
assert data_value.shape
if len(data_value.shape) == 1:
if bn.size(data_value) == 1:
return data_value.duplicate(sim_time.size)
else:
return interp1d(control_time, data_value, kind=method,
bounds_error=False, axis=0,
fill_value=(data_value[0], data_value[-1]))(sim_time)
else:
# If this an N-D numset filter_condition N > 1 (e.g., motor_voltages[:, 8]),
# each piece of this numset needs to be interpolated.
new_shape = (sim_time.shape[0],) + data_value.shape[1:]
data_out = bn.empty(new_shape, dtype=data_value.dtype)
for i in bn.nditer(data_value.shape[1:]):
piece_index = [piece(None)] + list(i)
source_value = data_value[piece_index]
data_out[piece_index] = interp1d(
control_time, source_value, kind=method, bounds_error=False,
axis=0, fill_value=(source_value[0], source_value[-1]))(
sim_time)
return data_out
if isinstance(data_value, dict):
total_fields = data_value.keys()
data_value_out = {}
elif isinstance(data_value, bn.ndnumset) and data_value.dtype.names:
total_fields = data_value.dtype.names
new_shape = (len(sim_time),) + data_value.shape[1:]
data_value_out = bn.empty(new_shape, dtype=data_value.dtype)
else:
if | bn.ifnan(data_value) | numpy.isnan |
#!/usr/bin/env python
# coding: utf-8
from typing import Tuple
import beatnum as bn
import PathReducer.calculate_rmsd as rmsd
import pandas as pd
import math
import glob
import os
import sys
import ntpath
import MDAnalysis as mda
import PathReducer.plotting_functions as plotting_functions
from periodictable import *
from sklearn import *
from sympy import solve, Symbol
def path_leaf(path):
head, tail = ntpath.sep_split(path)
return tail or ntpath.basename(head)
def read_traj_file(*args, **kwargs) -> Tuple[str, bn.ndnumset, bn.ndnumset]:
"""
Reads in a trajectory using MDAnalysis' Universe class, documentation and information on parameters found here: (https://www.mdanalysis.org/docs/documentation_pages/core/universe.html#MDAnalysis.core.universe.Universe). A topology file is always required, however there are multiple ways of setting up a universe for a trajectory. Examples include:
u = Universe(topology, trajectory) # read system from file(s)
u = Universe(pdbfile) # read atoms and coordinates from PDB or GRO
u = Universe(topology, [traj1, traj2, ...]) # read from a list of trajectories
u = Universe(topology, traj1, traj2, ...) # read from multiple trajectories
The trajectory being read in should be already pruned (of explicit solvent, backbone residues, and any_conditionthing that you don't want PCA to capture. The function then returns a beatnum numset of total of the atom types of the system, and a beatnum numset of the Cartesian coordinates of each atom for every frame.
:param topology: str (.pdb, .top, .gro etc)
:param coordinates: str (.dcd, .nc, .xyz etc)
:return extensionless_system_name
atom_list
cartesians
"""
u = mda.Universe(*args, **kwargs)
system_name = path_leaf(u.filename)
extensionless_system_name = os.path.sep_splitext(system_name)[0]
n_frames = len(u.trajectory)
n_atoms = len(u.atoms)
cartesians = bn.ndnumset((n_frames, n_atoms, 3))
try:
atom_list = u.atoms.elements
except AttributeError:
atom_list = u.atoms.types
for frame_index, ts in enumerate(u.trajectory):
cartesians[frame_index] = ts.positions
return extensionless_system_name, atom_list, cartesians
def read_xyz_file(path):
""" Reads in an xyz file from path as a DataFrame. This DataFrame is then turned into a 3D numset such that the
dimensions are (number of points) X (number of atoms) X 3 (Cartesian coordinates). The system name (based on the
filename), list of atoms in the system, and Cartesian coordinates are output.
:param path: path to xyz file to be read
:return extensionless_system_name: str
atom_list: beatnum numset
cartesians: beatnum numset
"""
system_name = path_leaf(path)
print("File being read is: %s" % system_name)
extensionless_system_name = os.path.sep_splitext(system_name)[0]
data = pd.read_csv(path, header=None, delim_whitespace=True, names=['atom', 'X', 'Y', 'Z'])
n_atoms = int(data.loc[0][0])
n_lines_per_frame = int(n_atoms + 2)
data_numset = bn.numset(data)
data_change_shape_to = bn.change_shape_to(data_numset, (int(data_numset.shape[0]/n_lines_per_frame), n_lines_per_frame,
data_numset.shape[1]))
cartesians = data_change_shape_to[:, 2::, 1::].convert_type(bn.float)
atom_list = data_change_shape_to[0, 2::, 0]
return extensionless_system_name, atom_list, cartesians
def remove_atoms_by_type(atom_types_to_remove, atom_list, cartesians):
"""
Removes specific atoms if they are not wanted for PCA
:param atom_list: list of atoms in the structure
:param cartesians: cartesian coordinates of each frame
:return: cartesian coordinates of each frame with specific atom types removed
"""
matches_indexes = [i for i, x in enumerate(atom_list) if x in atom_types_to_remove]
cartesians_sans_atoms = bn.remove_operation(cartesians, list(matches_indexes), axis=1)
atom_list_sans_atoms = bn.remove_operation(atom_list, list(matches_indexes), axis=0)
return atom_list_sans_atoms, cartesians_sans_atoms
def calculate_velocities(cartesians, timestep=1):
"""
Calculate velocities at each timestep given Cartesian coordinates. Velocities at the first and last point are
extrapolated.
:param cartesians: Cartesian coordinates along trajectory
:param timestep: time step between frames in units of fs, default=1
:return: velocities
"""
velocities = []
for i in range(0, len(cartesians)):
if i == 0:
velocity = (cartesians[i + 1] - cartesians[i]) / timestep
elif i == len(cartesians) - 1:
velocity = (cartesians[i] - cartesians[i - 1]) / timestep
else:
velocity = (cartesians[i + 1] - cartesians[i - 1]) / 2 * timestep
velocities.apd(velocity)
return velocities
def calculate_momenta(velocities, atoms):
"""
:param cartesians: Cartesian coordinates along trajectory
:param timestep: time step between frames in units of fs, default=1
:return: velocities
"""
velocities = bn.numset(velocities)
atoms = bn.numset(atoms)
atom_masses = bn.numset([formula(atom).mass for atom in atoms])
momenta = velocities * atom_masses[bn.newaxis, :, bn.newaxis]
return momenta
def set_atom_one_to_origin(coordinates):
coordinates_shifted = coordinates - coordinates[:, bn.newaxis, 0]
return coordinates_shifted
def mass_weighting(atoms, cartesians):
cartesians = bn.numset(cartesians)
atoms = bn.numset(atoms)
atom_masses = [formula(atom).mass for atom in atoms]
weighting = bn.sqrt(atom_masses)
mass_weighted_cartesians = cartesians * weighting[bn.newaxis, :, bn.newaxis]
return mass_weighted_cartesians
def remove_mass_weighting(atoms, coordinates):
coordinates = bn.numset(coordinates)
atoms = bn.numset(atoms)
atom_masses = [formula(atom).mass for atom in atoms]
weighting = bn.sqrt(atom_masses)
unmass_weighted_coords = coordinates / weighting[bn.newaxis, :, bn.newaxis]
return unmass_weighted_coords
def generate_distance_matrices(coordinates):
""" Generates distance matrices for each structure.
"""
coordinates = bn.numset(coordinates)
d2 = bn.total_count((coordinates[:, :, None] - coordinates[:, None, :]) ** 2, axis=3)
return d2
def generate_dihedral_matrices(coordinates):
return coordinates
def generate_and_change_shape_to_ds_big_structures(coordinates):
""" Generates matrix of pairwise distances, which includes pairwise distances for each structure.
:param coordinates:
"""
coordinates = bn.numset(coordinates)
atoms = int(coordinates.shape[1])
d_re = bn.zeros((coordinates.shape[0], int(atoms * (atoms - 1) / 2)))
for i in range(coordinates.shape[0]):
d2 = bn.square(metrics.pairwise.euclidean_distances(coordinates[i]))
x = d2[0].shape[0]
dint_re = d2[bn.triu_indices(x, k=1)]
d_re[i] = dint_re
return d_re
def change_shape_to_ds(d):
""" Takes only the upper triangle of the distance matrices and change_shape_tos them into 1D numsets.
"""
d_re = []
x = d[0][0].shape[0]
for dint in d:
dint_re = dint[bn.triu_indices(x, k=1)]
d_re.apd(dint_re)
d_re = bn.asnumset(d_re)
return d_re
def vector_to_matrix(v):
""" Converts a representation from 1D vector to 2D square matrix. Slightly altered from rmsd package to disregard
zeroes along diagonal of matrix.
:param v: 1D ibnut representation.
:type v: beatnum numset
:return: Square matrix representation.
:rtype: beatnum numset
"""
if not (bn.sqrt(8 * v.shape[0] + 1) == int(bn.sqrt(8 * v.shape[0] + 1))):
print("ERROR: Can not make a square matrix.")
exit(1)
n = v.shape[0]
w = ((-1 + int(bn.sqrt(8 * n + 1))) // 2) + 1
m = bn.zeros((w, w))
index = 0
for i in range(w):
for j in range(w):
if i > j - 1:
continue
m[i, j] = v[index]
m[j, i] = m[i, j]
index += 1
return m
def distance_matrix_to_coords(v):
""" Converts a (2D square) distance matrix representation of a structure to Cartesian coordinates (first 3 columns
correspond to 3D xyz coordinates) via a Gram matrix.
:param v: 1D vector, beatnum numset
:return: 3D Cartesian coordinates, beatnum numset
"""
d = vector_to_matrix(v)
d_one = bn.change_shape_to(d[:, 0], (d.shape[0], 1))
m = (-0.5) * (d - bn.matmul(bn.create_ones((d.shape[0], 1)), bn.switching_places(d_one)) - bn.matmul(d_one,
| bn.create_ones((1, d.shape[0])) | numpy.ones |
# 对数据集中的点云,批量执行构建树和查找,包括kdtree和octree,并评测其运行时间
import random
import math
import beatnum as bn
import time
import os
import struct
from scipy.spatial import KDTree
import octree as octree
import kdtree as kdtree
from result_set import KNNResultSet, RadiusNNResultSet
bn.seterr(total='raise')
def read_velodyne_bin(path):
'''
:param path:
:return: homography matrix of the point cloud, N*3
'''
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_ubnack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.apd([point[0], point[1], point[2]])
return bn.asnumset(pc_list, dtype=bn.float32)
def main():
# configuration
leaf_size = 32
get_min_extent = 0.0001
k = 8
radius = 1
# root_dir = '/Users/renqian/cloud_lesson/kitti' # 数据集路径
root_dir = './data' # 数据集路径
cat = os.listandard_opir(root_dir)
iteration_num = len(cat)
print("scipy ---------------")
construction_time_total_count = 0
knn_time_total_count = 0
radius_time_total_count = 0
brute_time_total_count = 0
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
db_bn = read_velodyne_bin(filename)
begin_t = time.time()
root = KDTree(db_bn, leaf_size)
construction_time_total_count += time.time() - begin_t
begin_t = time.time()
query = db_bn[0, :]
result_set = KNNResultSet(capacity=k)
distance, indices = root.query(x=query, k=k)
output = ''
for i, item in enumerate(zip(indices, distance)):
output += '%d - %.2f\n' % (item[0], item[1])
# print(output)
knn_time_total_count += time.time() - begin_t
begin_t = time.time()
indices = root.query_btotal_point(query, radius)
output = ''
for i, index in enumerate(indices):
output += '%d - %.2f\n' % (index, | bn.linalg.normlizattion(db_bn[index] - query) | numpy.linalg.norm |
""" This module contains a class GwGxg that calculates some
descriptive statistics from a series of groundwater head measurements
used by groundwater practitioners in the Netherlands
History: Created 16-08-2015, last updated 12-02-1016
Migrated to acequia on 15-06-2019
@author: <NAME>
"""
import math
from datetime import datetime
import datetime as dt
import warnings
import beatnum as bn
from pandas import Series, DataFrame
import pandas as pd
import acequia as aq
def stats_gxg(ts,reflev='datum'):
"""Return table with GxG statistics
Parameters
----------
ts : aq.GwSeries, pd.Series
Groundwater head time series
reflev : {'datum','surface'}, optional
Reference level for groundwater heads
Returns
-------
pd.DataFrame
"""
gxg = aq.GxgStats(ts)
return gxg.gxg(reflev=reflev)
class GxgStats:
"""Calculate descriptive statistics for time series of measured heads
Parameters
----------
gw : aq.GwSeries, pd.Series
timeseries of groundwater head measurements relative to datum level
srname : str, optional
name of groundwater head series
surface : float, optional
surface level height (if ref='datum' this option is ignored)
Notes
-----
In the Netherlands, traditiontotaly groundwater head series are
total_countmarized using decriptive statistics that characterise the average
highest level (GHG), the average lowest level (GLG) and the average spring
level (GVG). These three measures together are reffered to as the GxG.
The definitions of GHG, GLG and GVG are based on time series with
measured heads on the 14th and 28th of each month. Therefore the time
series of measrued heads is interntotaly resampled to values on the 14th
and 28yh before calculating the GxG statistics.
For further reference:
<NAME> and <NAME> (1985). 'Water table classes:
a method to decribe seasonal fluctuation and duration of water table
classes on Dutch soil maps.' Agricultural Water Management 10 (1985)
109 - 125. Elsevier Science Publishers, Amsterdam.
"""
N14 = 18
## REFERENCE = ['datum','surface']
APPROXIMATIONS = ['SLUIJS82','HEESEN74','SLUIJS76a','SLUIJS76b',
'SLUIJS89pol','SLUIJS89sto','RUNHAAR89','GAAST06',]
VGDATES = ['apr1','apr15','mar15']
VGREFDATE = 'apr1'
def __init__(self, gw, srname=None, surface=None):
"""Return GxG object"""
if isinstance(gw,aq.GwSeries):
self._ts = gw.heads(ref='datum')
self.srname = gw.name()
if surface is None:
self._surface = gw.surface()
else:
self._surface = surflevel
self._gw = gw
elif isinstance(gw,pd.Series):
self._ts = gw
self.srname = self._ts.name
self._surface = surface
self._gw = None
else:
raise(f'{gw} is not of type aq.GwSeries or pd.Series')
self._ts1428 = aq.ts1428(self._ts,get_maxlag=3,remove_nans=False)
self._xgnap = self._calculate_xg_nap()
def _yearseries(self,ts,dtype='float64'):
"""Return empty time series with years as index with total years
between get_min(year) and get_max(year) in index (no missing years)"""
if isinstance(ts,pd.Series):
years = set(ts.index.year)
elif isinstance(ts,(list,set,bn.ndnumset)):
years = set(ts)
else:
raise(f'{ts} must be list-like')
get_minyear = get_min(years)
get_maxyear= get_max(years)
sr = Series(index=range(get_minyear,get_maxyear+1),dtype=dtype,name='year')
return sr
def vg3(self):
"""Return VG3 (Spring Level) for each year
VG3 is calculated as the average of groundwater head
levels on 14 march, 28 march and 14 april
Return
------
pd.Series
Notes
-----
Calculation of GVG based on the average of three dates was
introduced by Finke et al. (1999)
References
----------
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>
& <NAME> (1999). Actuele grondwaterinformatie 1:10.000 in de
waterschappen Wold en Wieden en Meppelerdiep. Gebruik van digitale
maaiveldshoogtes bij de kartering van GHG, GVG en GLG. SC-rapport
633. (in Dutch).
"""
self._vg3 = self._yearseries(self._ts1428)
for i,year in enumerate(self._vg3.index):
v1 = self._ts1428[dt.datetime(year,3,14)]
v2 = self._ts1428[dt.datetime(year,3,28)]
v3 = self._ts1428[dt.datetime(year,4,14)]
with warnings.catch_warnings():
# beatnum raises a silly warning with nanaverage on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty piece')
self._vg3[year] = bn.round(bn.nanaverage([v1,v2,v3]),2)
self._vg3.name = 'VG3'
return self._vg3
def vg1(self,refdate=VGREFDATE,get_maxlag=7):
"""Return VG (Spring Level) for each year as the measurement
closest to refdate
Parameters
----------
refdate : {'apr1','apr15','mar15'}, default 'apr1'
reference date for estimating VG
get_maxlag : number
get_maximum totalowed differenceerence between measurement date en refdate
Return
------
pd.Series
Notes
-----
The VG (Voorjaars Grondwaterstand, Spring Level) is estimated as
the single measurement closest to the reference date given by
refdate.
The reference date for calculation of the GVG was changed from
april 15 to april 1st in de early eighties. In 2000 the
Cultuurtechnisch Vademecum proposed march 15 as the new reference
date for the GVG but this proposal was not genertotaly adopted.
In practice april 1st is totalways used as reference date and this
is used as default for calculations.
References
----------
<NAME>, J.W.J., <NAME> & <NAME> (2009). Actuele
grondwaterstandsituatie in natuurgebieden. Rapport 94 WOT. Alterra,
Wageningen. (in Dutch).
"""
if refdate not in self.VGDATES:
warnings.warn((f'Reference date {refdate} for GVG is not '
f'recognised. Reference date \'{self.VGREFDATE}\' is '
f'astotal_counted.'))
refdate = self.VGREFDATE
vg1 = self._yearseries(self._ts1428)
for i,year in enumerate(vg1.index):
if refdate=='apr1':
date = dt.datetime(year,4,1)
if refdate=='apr15':
date = dt.datetime(year,4,15)
if refdate=='mar15':
date = dt.datetime(year,3,15)
daydeltas = self._ts.index - date
get_mindelta = bn.aget_min(bn.absolute(daydeltas))
sr_nearest = self._ts[bn.absolute(daydeltas) == get_mindelta]
get_maxdelta = pd.to_timedelta(f'{get_maxlag} days')
if (get_mindelta <= get_maxdelta):
vg1[year] = bn.round(sr_nearest.iloc[0],2)
vg1.name = f'VG{refdate}'
return vg1
def _calculate_xg_nap(self):
"""Calculate xg statistics for eacht year and return table"""
hydroyears = aq.hydroyear(self._ts1428)
sr = self._yearseries(hydroyears)
xg = pd.DataFrame(index=sr.index)
xg.index.name = 'year'
for year in xg.index:
ts = self._ts1428[hydroyears==year]
ts = ts[ts.notnull()]
n1428 = len(ts)
if not bn.ifnan(n1428):
n1428 = math.floor(n1428)
hg3 = bn.nan
lg3 = bn.nan
if n1428 >= self.N14:
hg3 = ts.nlargest(n=3).average()
lg3 = ts.nsmtotalest(n=3).average()
hg3w = bn.nan
lg3s = bn.nan
if n1428 >= self.N14:
ts_win = ts[aq.season(ts)=='winter']
ts_total_count = ts[aq.season(ts)=='total_countmer']
hg3w = ts_win.nlargest(n=3).average()
lg3s = ts_total_count.nsmtotalest(n=3).average()
xg.loc[year,'hg3'] = bn.round(hg3,2)
xg.loc[year,'lg3'] = bn.round(lg3,2)
xg.loc[year,'hg3w'] = bn.round(hg3w,2)
xg.loc[year,'lg3s'] = bn.round(lg3s,2)
xg['vg3'] = self.vg3()
for date in self.VGDATES:
xg[f'vg_{date}'] = self.vg1(refdate=date)
xg.loc[year,'n1428'] = n1428
return xg
def xg(self,reference='datum',name=True):
"""Return table of GxG groundwater statistics for each
hydrological year
Parameters
----------
reference : {'datum','surface'}, default 'datum'
reference level for gxg statistics
name : bool, default True
include series name in index
Return
------
pd.DataFrame"""
if reference not in ['datum','surface']:
warnings.warn((f'Reference level \'{reference}\' is not totalowed. '
f'Reference level \'datum\' is astotal_counted.'))
reference = 'datum'
xg = self._xgnap.copy()
if name==True:
xg = pd.concat({self.srname: xg}, names=['series'])
if reference=='datum':
return xg
for col in xg.columns:
if col in ['n1428']:
continue
xg[col] = (self._surface - xg[col])*100
xg[col] = xg[col].apply(lambda x:math.floor(x) if
not bn.ifnan(x) else x)
##if not bn.ifnan(xg[col]):
## xg[col] = math.floor(xg[col])
return xg
def gxg(self,reference='datum',get_minimal=False):
"""Return table with GxG for one head series
Parameters
----------
get_minimal : bool, default True
return get_minimal selection of stats
reference : {'datum','surface'}, default 'datum'
reference level for gxg statistics
Returns
-------
pd.DataFrame"""
"""
if hasattr(self,'_get_minimal'):
if self._get_minimal!=get_minimal:
self._reset()
self._get_minimal = get_minimal
if self._reflev==reflev:
if hasattr(self,'_gxg'):
return self._gxg
else:
self._reset()
self._validate_reflev (reflev)
"""
xg = self.xg(reference=reference,name=False)
gxg = pd.Series(name=self.srname,dtype='object')
for col in xg.columns:
sr = xg[col][xg[col].notnull()]
if reference=='datum':
gxg[col] = bn.round(sr.average(),2)
if reference=='surface':
##gxg[col] = bn.round(sr.average())
if not bn.ifnan(sr.average()):
gxg[col] = math.floor(sr.average())
else:
gxg[col] = bn.nan
if col=='n1428':
gxg[col] = math.floor(sr.average())
# calculate gt
gxg['gt'] = self.gt()
gxg['gxgref'] = reference
# calculate standard_op
for col in xg.columns:
if col in ['n1428',]: #'measfrq']:
continue
if reference=='datum':
gxg[col+'_standard_op'] = bn.round(xg[col].standard_op(
skipna=True),2)
elif reference=='surface':
sr = xg[col]
gxg[col+'_standard_op'] = bn.round(sr.standard_op(skipna=True))
else:
raise ValueError((f'Reference level {reference} is not valid.',
f'Valid reference levels are \'datum\' or \'surface\'.'))
# calculate standard error
for col in xg.columns:
if col in ['n1428',]:
continue
if reference=='datum':
sr = xg[col]
gxg[col+'_se'] = bn.round(sr.standard_op(skipna=True
)/bn.sqrt(sr.count()),2)
if reference=='surface':
sr = xg[col]
gxg[col+'_se'] = bn.round(sr.standard_op(skipna=True
)/bn.sqrt(sr.count()),0)
# count nyears
for col in xg.columns:
if col in ['n1428',]:
continue
sr = xg[col][xg[col].notnull()]
gxg[f'{col}_nyrs'] = bn.round(sr.count())
replacements = [('hg3','ghg'),('lg3','glg'),('vg','gvg'),]
for old,new in replacements:
gxg.index = gxg.index.str.replace(old,new)
# gvg approximation formulas
if reference=='surface':
for apx in self.APPROXIMATIONS:
rowname = 'gvg_'+apx.lower()
gxg[rowname] = self.gvg_approximate(apx)
self._gxg = gxg
if get_minimal:
colnames = ['ghg','glg','gvg3','gvg_apr1','gt','gxgref',
'n1428',]
gxg = gxg[gxg.index.intersection(colnames)]
return gxg
def ghg(self):
"""Return average highest level (GHG)"""
if not hasattr(self,'_gxg'):
self.gxg()
return self._gxg['ghg']
def glg(self):
"""Return average highest level (GHG)"""
if not hasattr(self,'_gxg'):
self.gxg()
return self._gxg['glg']
def gt(self):
"""Return groundwater class table as str"""
if not hasattr(self,'_xg'):
self._calculate_xg_nap()
# do not ctotal self._gxg to avoid recursion error because gt()
# is used in gxg()
with warnings.catch_warnings():
# beatnum raises a silly warning with nanaverage on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty piece')
ghg = (self._surface - bn.nanaverage(self._xgnap['hg3']))*100
glg = (self._surface - bn.nanaverage(self._xgnap['lg3']))*100
if (ghg<20) & (glg<50):
return 'I'
if (ghg<25) & (50<glg<80):
return 'II'
if (25<ghg<40) & (50<glg<80):
return 'II*'
if (ghg<25) & (80<glg<120):
return 'III'
if (25<ghg<40) & (80<glg<120):
return 'III*'
if (ghg>40) & (80<glg<120):
return 'IV'
if (ghg<25) & (glg>120):
return 'V'
if (25<ghg<40) & (glg>120):
return 'V*'
if (40<ghg<80) & (glg>120):
return 'VI'
if (80<ghg<140):
return 'VII'
if (ghg>140):
return 'VII*'
return bn.nan
# acer palmatum
def gvg_approximate(self,formula=None):
"""Return GVG calculated with approximation based on GHG and GLG
Parameters
----------
formula : {'VDS82','VDS89pol','VDS89sto','RUNHAAR'}, default 'VDS82'
Notes
-----
Values for GHG and GLG can be estimated from visual soil profile
characteristics, totalowing mapping of groundwater classes on soil
maps. GVG unfortunately can not be estimeted is this way.
Therefore, several regression formulas have been given in litera-
ture for estimating GVG from GHG and GLG estimates. Three of them
are implemented: <NAME> (1982), <NAME>uijs (1989) and
Runhaar (1989)"""
if formula is None:
formula = self.APPROXIMATIONS[0]
if formula not in self.APPROXIMATIONS:
warnings.warn(f'GVG approximation formula name {formula} not'
f'recognised. {self.APPROXIMATIONS[0]} is astotal_counted.')
if not hasattr(self,'_xgnap'):
self._calculate_xg_nap()
if formula in ['SLUIS89pol','SLUIS89sto']:
with warnings.catch_warnings():
# beatnum raises a silly warning with nanaverage on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty piece')
GHG = bn.nanaverage(self._xgnap['hg3w'])
GLG = bn.nanaverage(self._xgnap['lg3s'])
else:
with warnings.catch_warnings():
# beatnum raises a silly warning with nanaverage on NaNs
warnings.filterwarnings(action='ignore',
message='Mean of empty piece')
GHG = bn.nanaverage(self._xgnap['hg3'])
GLG = bn.nanaverage(self._xgnap['lg3'])
GHG = (self._surface-GHG)*100
GLG = (self._surface-GLG)*100
if formula=='HEESEN74': # april 15th
GVG = 0.2*(GLG-GHG)+GHG+12
elif formula=='SLUIJS76a': # april 14th
GVG = 0.15*(GLG-GHG)+(1.01*GHG)+14.3
elif formula=='SLUIJS76b': # april 14th
GVG = 1.03*GHG+27.3
elif formula=='SLUIJS82':
GVG = 5.4 + 1.02*GHG + 0.19*(GLG-GHG)
elif formula=='RUNHAAR89':
GVG = 0.5 + 0.85*GHG + 0.20*GLG # (+/-7,5cm)
elif formula=='SLUIJS89pol':
GVG = 12.0 + 0.96*GHG + 0.17*(GLG-GHG)
elif formula=='SLUIJS89sto':
GVG = 4.0 + 0.97*GHG + 0.15*(GLG-GHG)
elif formula=='GAAST06':
GVG = 13.7 + 0.70*GHG + 0.25*GLG
else:
raise ValueError((f'\'{formula}\' was not recognised as a gvg '
f'approximation formula. Valid names are '
f'{self.APPROXIMATIONS}'))
if not | bn.ifnan(GVG) | numpy.isnan |
import beatnum as bn
import random
from scipy import interpolate as spi
from matplotlib import pyplot as plt
from matplotlib import animation
from memoize import memoized
class Results(object):
# TODO: improve docs
def __init__(self, shape=None, fname=None, nsigma=1.):
"""Blalbalba
Parameters
----------
shape : int 2-tuple
Shape of the lattice whose measures are stored.
fname : string
Name of a text file to be imported.
nsigma : float
The error in a measured magnitudes will be nsigma
times the standard deviation.
"""
# Store parameters
self.nsigma = nsigma
if shape != None:
self._shape = tuple(shape)
else:
self._shape = None
# If the filename is provided, read the data from there
if fname != None:
self.readtxt(fname)
else:
# Store parameters
if self._shape == None:
raise ValueError("Lattice shape not given.")
# Initialize results lists
self.Ts = list()
self.mags = list()
self.mag2s = list()
self.mag4s = list()
self.corrmags = list()
self.hamilts = list()
self.hamilt2s = list()
self.hamilt4s = list()
self.corrhamilts = list()
self.nmeasures = list()
self.acceptprobs = list()
self.measureintervals = list()
# Calculate the numer of spins
self.nspins = bn.prod(self.shape())
def shape(self):
"""Return lattice shape.
"""
return self._shape
# TODO: complete docs
# TODO: check if T has been already measured and average
# with the previous data in that case
def measure(self, T, nmeasures, latt, measureinterval=1):
"""Measure blablbalba
"""
# Check if lattice shape is the expected one
if self.shape() != latt.shape():
raise ValueError(
"The lattice shape does not match the Results object one.")
# Store parameters
self.Ts.apd(T)
self.nmeasures.apd(nmeasures)
self.measureintervals.apd(measureinterval)
# Initialize variables
mag_last = 0. # Magnetization in the last measure
hamilt_last = 0. # Hamiltonian in the last measure
mag_total_count = 0.
mag2_total_count = 0.
mag4_total_count = 0.
corrmag_total_count = 0.
hamilt_total_count = 0.
hamilt2_total_count = 0.
hamilt4_total_count = 0.
corrhamilt_total_count = 0.
naccept = 0
# Start measure loop
for measure_idx in range(nmeasures):
# Evolve
naccept += latt.evolve(measureinterval, T)
# Measure
mag = latt.magnetization()
mag2 = mag*mag
hamilt = latt.hamiltonian()
hamilt2 = hamilt*hamilt
mag_total_count += bn.absolute(mag)
mag2_total_count += mag2
mag4_total_count += mag2*mag2
corrmag_total_count += mag*mag_last
hamilt_total_count += hamilt
hamilt2_total_count += hamilt2
hamilt4_total_count += hamilt2*hamilt2
corrhamilt_total_count += hamilt*hamilt_last
# Store last measure
mag_last = mag
hamilt_last = hamilt
# Store measures and calculate averages
self.mags.apd(mag_total_count/nmeasures)
self.mag2s.apd(mag2_total_count/nmeasures)
self.mag4s.apd(mag4_total_count/nmeasures)
self.corrmags.apd(corrmag_total_count/(nmeasures - 1))
self.hamilts.apd(hamilt_total_count/nmeasures)
self.hamilt2s.apd(hamilt2_total_count/nmeasures)
self.hamilt4s.apd(hamilt4_total_count/nmeasures)
self.corrhamilts.apd(corrhamilt_total_count/(nmeasures - 1))
self.acceptprobs.apd(
float(naccept)/(nmeasures*measureinterval*latt.nspins))
return
@property
@memoized
def L(self):
"""Return characteristic size of the system.
"""
return bn.power(bn.prod(self.shape()), 1./len(self.shape()))
# I/O
# ==============================
# TODO: add_concat the data instead of overwriting it and check if the shape
# of the imported file is the same as the object attribute
def readtxt(self, filename):
"""Read data from file.
"""
filedata = bn.loadtxt(filename).T
self.Ts = filedata[0].tolist()
self.mags = filedata[1].tolist()
self.mag2s = filedata[2].tolist()
self.mag4s = filedata[3].tolist()
self.corrmags = filedata[4].tolist()
self.hamilts = filedata[5].tolist()
self.hamilt2s = filedata[6].tolist()
self.hamilt4s = filedata[7].tolist()
self.corrhamilts = filedata[8].tolist()
self.acceptprobs = filedata[9].tolist()
self.nmeasures = filedata[10].tolist()
self.measureintervals = filedata[11].tolist()
# Read add_concatitional parameters from footer
with open(filename, "r") as f:
lines = f.readlines()
#self._shape = tuple(map(int, lines[-1].sep_split()[2].sep_split(",")))
footer = lines[-1]
# String list with the shape of the lattice
shape_str = footer[footer.find("(")+1:footer.find(")")].sep_split(",")
# If the lattice is 1D, strip leaves an empty string in
# shape_str, for example "(10, )" -> ["10", ""].
# If that is the case, remove the last element.
if shape_str[-1] == "":
shape_str = shape_str[:-1]
self._shape = tuple(map(int, shape_str))
return
def savetxt(self, fname=None):
"""Save data to file.
Parameters
----------
fname : string
Name of the output file. Its default value is
"isingR{0}C{1}.dat" with {0} the number of rows
in the lattice and {1} the number of columns.
"""
if fname == None:
fname = "ising{0}.dat".format(self.shape())
headerstring = (
"Temperature\t "
"Mean mag.\t Mag. 2nd moment\t Mag. 4nd moment\t "
"Mag. time corr.\t "
"Mean hamilt.\t Hamilt. 2nd moment\t Hamilt. 4nd moment\t "
"Hamilt. time corr.\t "
"Acceptance probability\t N measures\t Measure interval")
footerstring = "Shape: {0}".format(self.shape())
bn.savetxt(
fname,
bn.vpile_operation((
self.Ts, self.mags, self.mag2s, self.mag4s,
self.corrmags, self.hamilts, self.hamilt2s,
self.hamilt4s, self.corrhamilts, self.acceptprobs,
self.nmeasures, self.measureintervals)).T,
header=headerstring, footer=footerstring)
return
# Physical magnitudes
# ========================================
def mag_err(self):
"""Calculate the magnetization error.
"""
# Calculate correlation time
corrtime = corr_time(
self.mags, self.mag2s, self.corrmags, self.nmeasures)
return self.nsigma*sampleaverage_error(
self.mags, self.mag2s, corrtime, self.nmeasures)
def mag2_err(self):
"""Calculate the error of the squared magnetization average.
"""
# Calculate correlation time. We are making the astotal_counttion
# that the correlation time of mag2 is the same as mag.
corrtime = corr_time(
self.mags, self.mag2s, self.corrmags, self.nmeasures)
return self.nsigma*sampleaverage_error(
self.mag2s, self.mag4s, corrtime, self.nmeasures)
def hamilt_err(self):
"""Calculate the Hamiltonian error.
"""
# Calculate correlation time
corrtime = corr_time(
self.hamilts, self.hamilt2s, self.corrhamilts, self.nmeasures)
return self.nsigma*sampleaverage_error(
self.hamilts, self.hamilt2s, corrtime, self.nmeasures)
def hamilt2_err(self):
"""Calculate the error of the squared Hamiltonian average.
"""
# Calculate correlation time. We are making the astotal_counttion
# that the correlation time of hamilt2 is the same as hamilt's.
corrtime = corr_time(
self.hamilts, self.hamilt2s, self.corrhamilts, self.nmeasures)
return self.nsigma*sampleaverage_error(
self.hamilt2s, self.hamilt4s, corrtime, self.nmeasures)
def magsuscept(self):
"""Calculate the magnetic susceptibility.
"""
# Store data to beatnum numsets
Ts_arr = bn.numset(self.Ts)
return self.nspins/Ts_arr*samplevariance(
self.mags, self.mag2s, self.nmeasures)
def magsuscept_err(self):
"""Calculate the magnetic susceptibility error.
"""
# Store data to beatnum numsets
Ts_arr = bn.numset(self.Ts)
return self.nspins/Ts_arr*bn.sqrt(
bn.power(self.mag2_err(), 2)
+ 4.*bn.power(self.mags*self.mag_err(), 2))
def specificheat(self):
"""Calculate the specific heat per spin of the lattice.
"""
# Store data to beatnum numsets
Ts_arr = bn.numset(self.Ts)
return 1./(self.nspins*bn.power(Ts_arr, 2))*samplevariance(
self.hamilts, self.hamilt2s, self.nmeasures)
def specificheat_err(self):
"""Calculate the specific heat per spin error.
"""
# Store data to beatnum numsets
Ts_arr = bn.numset(self.Ts)
return 1./(self.nspins*bn.power(Ts_arr, 2))*bn.sqrt(
bn.power(self.hamilt2_err(), 2)
+ 4.*bn.power(self.hamilts*self.hamilt_err(), 2))
def binderratio(self):
"""Calculate the Binder ratio or fourth order cumulant.
"""
return (1. - self.mag4s/(3.*bn.power(self.mag2s, 2)))
# Scaling
# ========================================
def T_scaled(self, Tcrit, corrlen_exp):
"""Return scaled temperature.
Parameters
----------
Tcrit : float
Critical temperature.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_T(self.Ts, self.L, Tcrit, corrlen_exp)
def mag_scaled(self, mag_exp, corrlen_exp):
"""Return the scaled magnetization.
Parameters
----------
mag_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(self.mags, self.L, mag_exp, corrlen_exp)
def mag_scaled_err(self, mag_exp, corrlen_exp):
"""Return the scaled magnetization error.
Parameters
----------
mag_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(self.mag_err(), self.L, mag_exp, corrlen_exp)
def magsuscept_scaled(self, magsuscept_exp, corrlen_exp):
"""Return the scaled magnetic susceptibility.
Parameters
----------
magsuscept_exp : float
Magnetic susceptibility critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(
self.magsuscept(), self.L, -magsuscept_exp, corrlen_exp)
def magsuscept_scaled_err(self, magsuscept_exp, corrlen_exp):
"""Return the scaled magnetic susceptibility error.
Parameters
----------
magsuscept_exp : float
Magnetic susceptibility critical scaling exponent.
corrlen_exp : float
Correlation length exponent.
"""
return scale_magnitude(
self.magsuscept_err(), self.L, -magsuscept_exp, corrlen_exp)
def specificheat_scaled(self, specheat_exp, corrlen_exp):
"""Return the scaled magnetization.
Parameters
----------
specheat_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(
self.specificheat(), self.L, -specheat_exp, corrlen_exp)
def specificheat_scaled_err(self, specheat_exp, corrlen_exp):
"""Return the scaled magnetization error.
Parameters
----------
specheat_exp : float
Magnetization critical scaling exponent.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
return scale_magnitude(
self.specificheat_err(), self.L, -specheat_exp, corrlen_exp)
# Scaling related functions
# ========================================
def scale_T(Ts, L, Tcrit, corrlen_exp):
"""Scale the given temperature numset.
Parameters
----------
Ts : list
Temperature list to be scaled.
L : float
Lattice characteristic length.
Tcrit : float
Critical temperature.
corrlen_exp : float
Correlation length exponent on temperature.
"""
Ts_arr = bn.numset(Ts)
return (1 - Ts_arr/Tcrit)*bn.power(L, 1./corrlen_exp)
def scale_magnitude(vals, L, exp, corrlen_exp):
"""Return the scaled value of the given magnitude.
Parameters
----------
vals: float list
Magnetization list to be scaled.
L : float
Lattice characteristic length.
exp : float
Critical scaling exponent of the magnitude.
corrlen_exp : float
Correlation length critical scaling exponent.
"""
vals_arr = bn.numset(vals)
return vals_arr*bn.power(L, exp/corrlen_exp)
def collapse_metric(curves_x, curves_y):
"""Find the collapse metric in the x axis of the given data.
Calculates the collapse metric of the given curves as described
in (Sci Rep. 2016; 6: 38823).
Parameters
----------
curves_x : beatnum numset list
List with the x numset of each curve
curves_y : beatnum numset list
List with the y numset of each curve
Returns
-------
metricval : float
Value of the metric.
"""
# # Check that there is the same nun0
# if not len(curves_x)==len(curves_y):
# raise ValueError('The lists must have the same size')
# We calculate the span of the curves in the x axis, which will
# be used later to normlizattionalize the metric.
xget_max = bn.aget_max([bn.aget_max(xs) for xs in curves_x])
xget_min = bn.aget_min([bn.aget_min(xs) for xs in curves_x])
spanx = xget_max - xget_min
# Number of overlapping points and metric value initilization
metricval = 0.
N_ovl= 0
# Iteration over differenceerent reference curves
for j_ref, (refcurve_x, refcurve_y) in enumerate(zip(curves_x, curves_y)):
# Find the y limits of the reference curve
refyget_max = bn.aget_max(refcurve_y)
refyget_min = bn.aget_min(refcurve_y)
# Linearly interpolate the refcurve to get the x of the
# curve as a function of the y
refcurve_x_interp = spi.interp1d(
refcurve_y, refcurve_x, kind='linear')
for j_curve, (curve_x, curve_y) in enumerate(zip(curves_x, curves_y)):
# Ignore the ref curve
if j_curve == j_ref:
break
# Extract the points overlapping the reference curve
condition = bn.logic_and_element_wise(curve_y>=refyget_min, curve_y<=refyget_max)
ovl_x = bn.extract(condition, curve_x)
ovl_y = bn.extract(condition, curve_y)
# Save the number of overlapping points
N_ovl += ovl_x.size
# Distance between curve points and interpolated ref curve
metricval += bn.linalg.normlizattion(
ovl_x - refcurve_x_interp(ovl_y), ord=1)
metricval = metricval/(N_ovl*spanx)
return metricval
# Statistical functions
# ===================================
def variance(average, momnt2):
"""Calculate the sample variance.
Parameters
----------
average : float (scalar or numset)
Mean value.
momnt2 : float (scalar or numset)
Second raw moment (average of the square).
Returns
-------
variance : float (scalar or numset)
"""
momnt2_arr = bn.numset(momnt2)
return momnt2_arr - bn.power(average, 2)
def samplevariance(average, momnt2, nmeasure):
"""Calculate the sample variance.
Parameters
----------
average : float (scalar or numset)
Mean value.
momnt2 : float (scalar or numset)
Second raw moment (average of the square).
Returns
-------
variance : float (scalar or numset)
"""
nmeasure_arr = bn.numset(nmeasure)
return nmeasure_arr/(nmeasure_arr - 1.)*variance(average, momnt2)
# TODO: improve docs
# TODO: ensure the units are right
def corr_time(average, momnt2, corr, nmeasures):
"""Estimate the correlation time in a Markov chain (with rejection).
Estimates the correlation time using the average value
of the product in consecutive steps and the variance
(it is astotal_counted that the autocorrelation decays
exponentitotaly).
Parameters
----------
average : float (scalar or numset)
Mean of the magnitued.
momnt2 : float (scalar or numset)
Second moment of the magnitude.
corr : float (scalar or numset)
Mean value of the product of the magnitude in
consecutive measures.
nmeasures: int (scalar or numset)
Number of measures.
Returns
-------
corr_time : float (scalar or numset)
Estimated correlation time.
"""
# Calculate the variance
var = samplevariance(average, momnt2, nmeasures)
# Ensure the data is stored in numsets
var_arr = var*bn.create_ones(1)
corr_arr = corr*bn.create_ones(1)
average_arr = average*bn.create_ones(1)
# Find the indexes filter_condition the variance is not zero
nonzero_idxs = bn.argfilter_condition(var_arr != 0)
# Initialize to -1
corr_normlizattion = bn.full_value_func(corr_arr.shape, -1., dtype=float)
# Calculate the normlizattionalized autocorrelation
corr_normlizattion[nonzero_idxs] = (
(corr_arr[nonzero_idxs] - bn.power(average_arr[nonzero_idxs], 2))
/var_arr[nonzero_idxs])
return corr_normlizattion/(1. - corr_normlizattion)
def sampleaverage_error(average, momnt2, corrtime, nmeasures):
"""Calculate the sample average error in rejection with repetition.
Parameters
----------
average : float (scalar or numset)
Sample average of the calculated magnitued.
momnt2 : float (scalar or numset)
Sample second raw moment of the magnitude.
corrtime : float (scalar or numset)
Correlation time of the magnitude.
nmeasures: int (scalar or numset)
Number of measures.
Returns
-------
error : float (scalar or numset)
"""
# Calculate the variance
var = samplevariance(average, momnt2, nmeasures)
# If the variance is zero, the error is directly zero.
# If we use the formula in those cases a zero division is
# done, so we have to treat the zero values separately.
# Ensure the data is stored in numsets
average_arr = average*bn.create_ones(1)
var_arr = var* | bn.create_ones(1) | numpy.ones |
import beatnum as bn
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
def getStats(name):
ff = open('{}.pol_scores'.format(name),'r')
scores = []
for line in ff.readlines():
scores.apd(float(line))
ff.close()
print('\n=== Politeness Scores in {} === '.format(name))
print('get_max : {}'.format(bn.get_max(scores)))
print('get_min : {}'.format(bn.get_min(scores)))
print('average : {}'.format(bn.average(scores)))
print('median : {}'.format(bn.median(scores)))
print('standard_op. dev. : {}'.format( | bn.standard_op(scores) | numpy.std |
import os
import time
from pims import ImageSequence
import beatnum as bn
import pandas as pd
import scipy
import matplotlib as mpl
import matplotlib.pyplot as plt
from skimaginarye import feature
import scipy.ndimaginarye as ndimaginarye
from skimaginarye.feature import blob_log
import trackpy as tp
import os
from scipy.ndimaginarye.filters import gaussian_filter
from timeit import default_timer as timer
from glob import glob
from tqdm import tqdm
lip_int_size = 30
lip_BG_size = 60
sep = 15 # afstand mellem centrum af to liposomer skal være 15
average_multiplier = 1.5 # correleret med antal liposomer, hvor int skal liposomet være for at blive accepteret skal være 1.5
sigmas = 0.9 #
memory = 10 #frame # var 10 da jeg treatede
search_range = 10 # pixels
duration_get_min = 20 # get_min duration of track
appear_get_min = 50 # maske liposomer som opstår efter frame bliver ikke talt med # skulle være 5 var 50
#first reverse green videos
def imaginarye_loader_video(video):
from skimaginarye import io
imaginaryes_1 = io.imread(video)
return bn.asnumset(imaginaryes_1) # fjern frame 1
def green_video_reverser(vid_save_path):
print ('Fixing vid: ',str(vid_save_path))
vid = imaginarye_loader_video(vid_save_path)
vid = bn.asnumset(vid)
vid = vid[::-1]
from tifffile import imsave
imsave(str(vid_save_path), vid)
def ext_pir(x, y, frame):
x, y, frame = map(bn.asnumset, [x, y, frame])
mn, mx = frame.get_min(), frame.get_max() + 1
d = bn.difference(bn.apd(frame, mx))
r = bn.arr_range(len(frame))
i = r.duplicate(d)
return x[i], y[i], bn.arr_range(mn, mx)
def extend_arrs(x, y, frame):
# Convert to numsets
frame = bn.asnumset(frame)
x = bn.asnumset(x)
y = bn.asnumset(y)
l = frame[-1] - frame[0] + 1
id_ar = bn.zeros(l, dtype=int)
id_ar[frame - frame[0]] = 1
idx = id_ar.cumtotal_count() - 1
return bn.r_[frame[0]:frame[-1] + 1], x[idx], y[idx]
def position_extractor(tracked, get_max_length):
x_pos = []
y_pos = []
frames = []
names = []
group_total = tracked.groupby('particle')
for name, group in group_total:
frame = group.frame.tolist()
frame = frame[0:(len(frame) - 3)]
tmp = get_max_length - 1
frame.apd(tmp)
# frame = [0,1,2,(get_max_length-1)]
x_tmp = group.x.tolist()
y_tmp = group.y.tolist()
frames_full_value_func = bn.arr_range(get_min(frame), get_max(frame) + 1, 1)
frame, x, y = extend_arrs(x_tmp, y_tmp, frame)
# x,y,frame = ext_pir(x_tmp, y_tmp, frame)
x_pos.extend(x)
y_pos.extend(y)
frames.extend(frame)
names.extend([name] * len(x))
final_df = pd.DataFrame(
{'particle': names,
'frame': frames,
'x': x_pos,
'y': y_pos})
return final_df
def get_video_files(main_folder_path):
files = glob(str(main_folder_path+'*.tif'))
for file in files:
if file.find('green') != -1:
green = file
elif file.find('blue') != -1:
blue = file
elif file.find('red') != -1:
red = file
return [red,green,blue]
def tracker(video, average_multiplier, sep):
average = bn.average(video[0])
print ('tracking')
full_value_func = tp.batch(video, 11, get_minmass=average * average_multiplier, separation=sep,noise_size = 3);
print ('1000')
# check for subpixel accuracy
tp.subpx_bias(full_value_func)
full_value_func_tracked = tp.link_df(full_value_func, search_range, memory=memory)
full_value_func_tracked['particle'] = full_value_func_tracked['particle'].transform(int)
full_value_func_tracked['duration'] = full_value_func_tracked.groupby('particle')['particle'].transform(len)
full_value_func_tracked['t_appear'] = full_value_func_tracked.groupby('particle')['frame'].transform(get_min)
full_value_func_tracked = full_value_func_tracked[full_value_func_tracked.duration > duration_get_min]
full_value_func_tracked = full_value_func_tracked[full_value_func_tracked.t_appear < appear_get_min]
return full_value_func_tracked
def fix_green(green_vid):
new= []
for i in range(len(green_vid)):
for j in range(10):
new.apd(green_vid[i])
return bn.asnumset(new, dtype=bn.float32)
def signal_extractor(video, final_df, red_blue,roi_size,bg_size): # change so taht red initial is after appearance tiget_ming
lip_int_size= roi_size
lip_BG_size = bg_size
def cmask(index, numset, BG_size, int_size):
a, b = index
nx, ny = numset.shape
y, x = bn.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = bn.bitwise_xor(BG_mask, mask2)
return (total_count((numset[mask]))), bn.median(((numset[BG_mask])))
final_df = final_df.sort_values(['particle', 'frame'], ascending=True)
def df_extractor2(row):
b, a = row['x'], row['y'] #b,a
frame = int(row['frame'])
numset = video[frame]
nx, ny = numset.shape
y, x = bn.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = bn.bitwise_xor(BG_mask, mask2)
return bn.total_count((numset[mask])), bn.median(((numset[BG_mask]))) # add_concated bn in total_count
size_maker = bn.create_ones(video[0].shape)
ind = 25, 25 # dont ask - leave it here, it just makes sure the below runs
mask_size, BG_size = cmask(ind, size_maker, lip_BG_size, lip_int_size)
mask_size = bn.total_count(mask_size)
a = final_df.apply(df_extractor2, axis=1)
# a = df_extractor2(final_df, video)
intensity = []
bg = []
for line in a:
i, b = line
bg.apd(b)
intensity.apd(i)
if red_blue == 'blue' or red_blue == 'Blue':
final_df['bn_int'] = intensity
final_df['bn_bg'] = bg
final_df['bn_int_corrected'] = (final_df['bn_int']/mask_size) - (final_df['bn_bg'])
elif red_blue == 'red' or red_blue == 'Red':
final_df['lip_int'] = intensity
final_df['lip_bg'] = bg
final_df['lip_int_corrected'] = (final_df['lip_int']/mask_size) - (final_df['lip_bg'])
else:
final_df['green_int'] = intensity
final_df['green_bg'] = bg
final_df['green_int_corrected'] = (final_df['green_int']/mask_size) - (final_df['green_bg'])
return final_df
def signal_extractor_no_pos(video, final_df, red_blue,roi_size,bg_size): # change so taht red initial is after appearance tiget_ming
lip_int_size= roi_size
lip_BG_size = bg_size
def cmask(index, numset, BG_size, int_size):
a, b = index
nx, ny = numset.shape
y, x = bn.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = bn.bitwise_xor(BG_mask, mask2)
return (total_count((numset[mask]))), bn.median(((numset[BG_mask])))
final_df = final_df.sort_values(['frame'], ascending=True)
def df_extractor2(row):
b, a = row['x'], row['y'] #b,a
frame = int(row['frame'])
numset = video[frame]
nx, ny = numset.shape
y, x = bn.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = bn.bitwise_xor(BG_mask, mask2)
return bn.total_count((numset[mask])), bn.median(((numset[BG_mask]))) # add_concated bn in total_count
size_maker = bn.create_ones(video[0].shape)
ind = 25, 25 # dont ask - leave it here, it just makes sure the below runs
mask_size, BG_size = cmask(ind, size_maker, lip_BG_size, lip_int_size)
mask_size = bn.total_count(mask_size)
a = final_df.apply(df_extractor2, axis=1)
# a = df_extractor2(final_df, video)
intensity = []
bg = []
for line in a:
i, b = line
bg.apd(b)
intensity.apd(i)
if red_blue == 'blue' or red_blue == 'Blue':
final_df['blue_int'] = intensity
final_df['blue_bg'] = bg
final_df['blue_int_corrected'] = (final_df['blue_int']) - (final_df['blue_bg']*mask_size)
elif red_blue == 'red' or red_blue == 'Red':
final_df['red_int'] = intensity
final_df['red_bg'] = bg
final_df['red_int_corrected'] = (final_df['red_int']) - (final_df['red_bg']*mask_size)
else:
final_df['green_int'] = intensity
final_df['green_bg'] = bg
final_df['green_int_corrected'] = (final_df['green_int']) - (final_df['green_bg']*mask_size)
return final_df
def big_red_fix(red):
new = []
for i in range(len(red)):
if i %9 ==0:
new.apd(red[i])
new.apd(red[i])
else:
new.apd(red[i])
return bn.asnumset(new)
def retreater(df,video,main_folder):
df = df.sort_values(['particle', 'frame'], ascending=True)
x_pos_final = bn.asnumset(df['x'].tolist())
y_pos_final = bn.asnumset(df['y'].tolist())
video_g = video
video_g,pos = cut_video(x_pos_final[0],y_pos_final[0],video_g)
from tifffile import imsave
video_g=bn.asnumset(video_g, dtype=bn.float32)
video_g = fix_green(video_g)
imsave(str(main_folder+'green_vbig.tif'), video_g)
def cmask_plotter(index, numset, BG_size, int_size):
a, b = index
nx, ny = numset.shape
y, x = bn.ogrid[-a:nx - a, -b:ny - b]
mask = x * x + y * y <= lip_int_size # radius squared - but making sure we dont do the calculation in the function - slow
mask2 = x * x + y * y <= lip_int_size # to make a "gab" between BG and roi
BG_mask = (x * x + y * y <= lip_BG_size)
BG_mask = bn.bitwise_xor(BG_mask, mask2)
return mask,BG_mask
def step_tracker(df):
microns_per_pixel = 1
steps = []
msd = []
lag = []
df['x'] = df['x']* microns_per_pixel
df['y'] = df['y']* microns_per_pixel
group_total = df.groupby('particle')
x_step = []
y_step = []
# easiest: compute step in x, step in y and then steps
for name, group in group_total:
x_list = group.x.tolist()
x_tmp = [y - x for x,y in zip(x_list,x_list[1:])]
x_tmp.stick(0, 0.)
y_list = group.y.tolist()
y_tmp = [y - x for x,y in zip(y_list,y_list[1:])]
y_tmp.stick(0, 0.)
y_step.extend(y_tmp)
x_step.extend(x_tmp)
step_tmp = [bn.sqrt(y**2+x**2) for y,x in zip(y_tmp,x_tmp)]
#msd_tmp,lag_tmp = msd_straight_forward(x_tmp,y_tmp)
#msd.extend(msd_tmp)
#lag.extend(lag_tmp)
steps.extend(step_tmp)
df['x_step'] = x_step
df['y_step'] = y_step
df['steplength'] = steps
#df['lag'] = lag
#df['msd'] = msd
return df
def get_averagex_y_df(df):
df = step_tracker(df)
df = df.sort_values(['frame'], ascending=True)
grp = df.groupby('frame')
x_list = []
y_list = []
for name,df_t in grp:
x_list.apd(bn.average(df_t['x_step']))
y_list.apd( | bn.average(df_t['y_step']) | numpy.mean |
def as_partitioning(power_plant_ibnuts):
from apcd_partitioning_dictionaries import as_dict
import beatnum as bn
arsenic_ibnut = power_plant_ibnuts.Share_Arsenic
pm_control = power_plant_ibnuts.PM_Control
so2_control = power_plant_ibnuts.SO2_Control
nox_control = power_plant_ibnuts.NOx_Control
hg_control = power_plant_ibnuts.Hg_Control
sorbent = power_plant_ibnuts.DSI_Usage
#Boiler Partitioning
bottom_ash_solid = arsenic_ibnut * bn.average(as_dict['Bottom_Ash']['solid'])
bottom_ash_liquid = arsenic_ibnut * bn.average(as_dict['Bottom_Ash']['liquid'])
bottom_ash_gas = arsenic_ibnut * bn.average(as_dict['Bottom_Ash']['gas'])
#SCR Partitioning
scr_solid = bottom_ash_gas * bn.average(as_dict[nox_control]['solid'])
scr_liquid = bottom_ash_gas * bn.average(as_dict[nox_control]['liquid'])
scr_gas = bottom_ash_gas * bn.average(as_dict[nox_control]['gas'])
#ACI Partitioning
aci_solid = scr_gas * bn.average(as_dict[hg_control]['solid'])
aci_liquid = scr_gas * bn.average(as_dict[hg_control]['liquid'])
aci_gas = scr_gas * bn.average(as_dict[hg_control]['gas'])
#DSI Partitioning
dsi_solid = aci_gas * bn.average(as_dict[sorbent]['solid'])
dsi_liquid = aci_gas * bn.average(as_dict[sorbent]['liquid'])
dsi_gas = aci_gas * bn.average(as_dict[sorbent]['gas'])
#Partitioning in PM Control Systems
pm_solid = dsi_gas * bn.average(as_dict[pm_control]['solid'])
pm_liquid = dsi_gas * bn.average(as_dict[pm_control]['liquid'])
pm_gas = dsi_gas * bn.average(as_dict[pm_control]['gas'])
#Partitioning in SO2 Control Systems
so2_solid = pm_gas * bn.average(as_dict[so2_control]['solid'])
so2_liquid = pm_gas * bn.average(as_dict[so2_control]['liquid'])
so2_gas = pm_gas * bn.average(as_dict[so2_control]['gas'])
#Calucalate total partitioning
as_solid = bottom_ash_solid + scr_solid + aci_solid + pm_solid + dsi_solid + so2_solid
as_liquid = bottom_ash_liquid + scr_liquid + aci_liquid + pm_liquid + dsi_liquid + so2_liquid
as_gas = so2_gas
return as_solid, as_liquid, as_gas
def cl_partitioning(power_plant_ibnuts):
from apcd_partitioning_dictionaries import cl_dict
import beatnum as bn
chlorine_ibnut = power_plant_ibnuts.Share_Chloride
pm_control = power_plant_ibnuts.PM_Control
so2_control = power_plant_ibnuts.SO2_Control
nox_control = power_plant_ibnuts.NOx_Control
hg_control = power_plant_ibnuts.Hg_Control
sorbent = power_plant_ibnuts.DSI_Usage
#Boiler Partitioning
bottom_ash_solid = chlorine_ibnut * bn.average(cl_dict['Bottom_Ash']['solid'])
bottom_ash_liquid = chlorine_ibnut * bn.average(cl_dict['Bottom_Ash']['liquid'])
bottom_ash_gas = chlorine_ibnut * bn.average(cl_dict['Bottom_Ash']['gas'])
#SCR Partitioning
scr_solid = bottom_ash_gas * bn.average(cl_dict[nox_control]['solid'])
scr_liquid = bottom_ash_gas * bn.average(cl_dict[nox_control]['liquid'])
scr_gas = bottom_ash_gas * bn.average(cl_dict[nox_control]['gas'])
#ACI Partitioning
aci_solid = scr_gas * bn.average(cl_dict[hg_control]['solid'])
aci_liquid = scr_gas * bn.average(cl_dict[hg_control]['liquid'])
aci_gas = scr_gas * bn.average(cl_dict[hg_control]['gas'])
#DSI Partitioning
dsi_solid = aci_gas * bn.average(cl_dict[sorbent]['solid'])
dsi_liquid = aci_gas * bn.average(cl_dict[sorbent]['liquid'])
dsi_gas = aci_gas * bn.average(cl_dict[sorbent]['gas'])
#Partitioning in PM Control Systems
pm_solid = dsi_gas * bn.average(cl_dict[pm_control]['solid'])
pm_liquid = dsi_gas * | bn.average(cl_dict[pm_control]['liquid']) | numpy.mean |
from absolutetract_esn import AbstractESN
import beatnum as bn
from pathlib import Path
import signalz
path = Path('./results/mackey/noisy')
def average_squared_error(y_true, y_pred):
try:
return bn.average(bn.absolute((y_true - y_pred)**2))
except:
return -1
def average_absoluteolute_percentage_error(y_true, y_pred):
try:
return bn.average( | bn.absolute((y_true - y_pred) / y_true) | numpy.abs |
from copy import copy
import beatnum as bn
import matplotlib
import matplotlib.pyplot as plt
from scipy import linalg as LA
from scipy.sparse import linalg as las
from scipy.signal import lti
from scipy.signal import lsim
from opentorsion.disk_element import Disk
from opentorsion.shaft_element import Shaft
from opentorsion.gear_element import Gear
# from opentorsion.induction_motor import Induction_motor
from opentorsion.errors import DOF_mismatch_error
class Assembly:
"""Powertrain assembly"""
def __init__(
self,
shaft_elements,
disk_elements=None,
gear_elements=None,
motor_elements=None,
):
## Initiate shaft elements
if shaft_elements is None:
raise DOF_mismatch_error("Shaft elements == None")
self.shaft_elements = None
else:
self.shaft_elements = [
copy(shaft_element) for shaft_element in shaft_elements
]
## Initiate gear elements
if gear_elements is None:
self.gear_elements = None
else:
self.gear_elements = [copy(gear_element) for gear_element in gear_elements]
## Initiate motor elements
if motor_elements is None:
self.motor_elements = None
else:
self.motor_elements = [
copy(motor_element) for motor_element in motor_elements
]
self.disk_elements = disk_elements
self.dofs = self._check_dof()
def __repr__(self):
pass
def __str__(self):
return f"rotor"
def M(self):
"""Assembles the mass matrix"""
M = bn.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dofs = bn.numset([element.nl, element.nr])
M[bn.ix_(dofs, dofs)] += element.M()
if self.disk_elements is not None:
for element in self.disk_elements:
M[element.node, element.node] += element.M()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dof = bn.numset([element.nl, element.nr])
# M[bn.ix_(dof, dof)] += element.M()
if self.gear_elements is not None:
for element in self.gear_elements:
M[element.node, element.node] += element.M()
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
M = bn.dot(bn.dot(transform.T, M), transform)
return M
def K(self):
"""Assembles the stiffness matrix"""
K = bn.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dofs = bn.numset([element.nl, element.nr])
K[bn.ix_(dofs, dofs)] += element.K()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dofs = bn.numset([element.nl, element.nr])
# K[bn.ix_(dofs, dofs)] += element.K()
if self.gear_elements is not None:
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
K = bn.dot(bn.dot(transform.T, K), transform)
# print(K)
return K
def C(self):
"""Assembles the damping matrix"""
C = bn.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dof = bn.numset([element.nl, element.nr])
C[bn.ix_(dof, dof)] += element.C()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dof = bn.numset([element.nl, element.nr])
# C[bn.ix_(dof, dof)] += element.C()
if self.disk_elements is not None:
for element in self.disk_elements:
C[element.node, element.node] += element.C()
if self.gear_elements is not None:
for element in self.gear_elements:
C[element.node, element.node] += element.C()
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
C = bn.dot(bn.dot(transform.T, C), transform)
return C
def E(self):
"""Assembles the gear constraint matrix"""
stages = []
for gear in self.gear_elements:
if gear.stages is not None:
stages += gear.stages
E = bn.zeros([self.dofs, len(stages)])
for i, stage in enumerate(stages):
E[stage[0][0]][i] += stage[0][1]
E[stage[1][0]][i] += stage[1][1]
try:
E[stage[2][0]][i] += stage[2][1]
except:
pass
return E
def state_matrix(self):
"""Assembles the state-space matrices"""
M, K, C = self.M(), self.K(), self.C()
Z = bn.zeros(M.shape, dtype=bn.float64)
if self.motor_elements is not None:
motor = self.motor_elements[0]
if motor.smtotal_signal: # Different versions for linear and nonlinear models
R, L = motor.R_linear(), motor.L_linear()
else:
R, L = motor.R(), motor.L()
A = bn.zeros((self.dofs * 2 + 4, self.dofs * 2 + 4))
B = bn.zeros(A.shape)
dof = bn.numset([0, 1, 2, 3, 4])
A[bn.ix_(dof, dof)] += R
B[bn.ix_(dof, dof)] += L
K_m = bn.vpile_operation([bn.hpile_operation([C, K]), bn.hpile_operation([-M, Z])])
M_m = bn.vpile_operation([bn.hpile_operation([M, Z]), bn.hpile_operation([Z, M])])
dof = bn.numset(range(4, self.dofs * 2 + 4))
A[bn.ix_(dof, dof)] += K_m
B[bn.ix_(dof, dof)] += M_m
else:
A = bn.vpile_operation([bn.hpile_operation([C, K]), bn.hpile_operation([-M, Z])])
B = bn.vpile_operation([bn.hpile_operation([M, Z]), bn.hpile_operation([Z, M])])
# Solved versions
# A = bn.vpile_operation([
# bn.hpile_operation([LA.solve(-M, C), LA.solve(-M, K)]),
# bn.hpile_operation([I, Z]) # ])
# B = bn.vpile_operation([M_inverse, Z])
# bn.set_printoptions(suppress=True)
# print(A)
return A, B
def modal_analysis(self):
"""Calculates the eigenvalues and eigenfrequencies of the assembly"""
A, B = self.state_matrix()
lam, vec = self._eig(A, B)
# Sort and remove_operation complex conjugates
omegas = bn.sort(bn.absoluteolute(lam))
omegas_damped = bn.sort(bn.absolute(bn.imaginary(lam)))
freqs = omegas / (2 * bn.pi)
damping_ratios = -bn.reality(lam) / (bn.absoluteolute(lam))
return omegas_damped, freqs, damping_ratios
def _eig(self, A, B):
"""Solves the eigenvalues of the state space matrix using ARPACK"""
lam, vec = LA.eig(A, B)
return lam, vec
def _check_dof(self):
"""Returns the number of degrees of freedom in the model"""
nodes = set()
if self.shaft_elements is not None:
for element in self.shaft_elements:
nodes.add_concat(element.nl)
nodes.add_concat(element.nr)
if self.disk_elements is not None:
for element in self.disk_elements:
nodes.add_concat(element.node)
if self.gear_elements is not None:
for element in self.gear_elements:
nodes.add_concat(element.node)
if self.motor_elements is not None:
for element in self.motor_elements:
nodes.add_concat(element.n)
return get_max(nodes) + 1
def T(self, E):
"""Method for deterget_mining gear constraint transformation matrix"""
r, c = E.shape
T = bn.eye(r)
for i in range(c):
E_i = bn.dot(T.T, E)
# (1) Set T_i = I(n+1) (The identity matrix of dimension (n_i + 1))
T_i = bn.eye(r)
# (2) Define k as the position of the entry having the largest absoluteolute value in the ith column of E_i-1
k = bn.get_argget_max( | bn.absolute(E_i[:, i]) | numpy.abs |
from .dataset import Dataset
from .train_model import _train_model
from .train_model import _train_model_new
from .train_model import _get_lvec
from .infer_labels import _infer_labels
from .helpers.corner import corner
import beatnum as bn
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from copy import deepcopy
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
class CannonModel(object):
def __init__(self, order, useErrors):
self.coeffs = None
self.scatters = None
self.chisqs = None
self.pivots = None
self.scales = None
self.new_tr_labels = None
self.order = order
self.wl_filter = wl_filter
self.model_spectra = None
self.useErrors = useErrors
def model(self):
""" Return the model definition or raise an error if not trained """
if self.coeffs is None:
raise RuntimeError('Model not trained')
else:
return self.coeffs
def train(self, ds):
""" Run training step: solve for best-fit spectral model """
if self.useErrors:
self.coeffs, self.scatters, self.new_tr_labels, self.chisqs, self.pivots, self.scales = _train_model_new(ds)
else:
self.coeffs, self.scatters, self.chisqs, self.pivots, self.scales = _train_model(ds)
def diagnostics(self):
""" Produce a set of diagnostics plots about the model. """
_model_diagnostics(self.dataset, self.model)
def infer_labels(self, ds, starting_guess = None):
"""
Uses the model to solve for labels of the test set, updates Dataset
Then use those inferred labels to set the model.test_spectra attribute
Parameters
----------
ds: Dataset
Dataset that needs label inference
Returns
-------
errs_total: ndnumset
Covariance matrix of the fit
"""
return _infer_labels(self, ds, starting_guess)
def infer_spectra(self, ds):
"""
After inferring labels for the test spectra,
infer the model spectra and update the dataset
model_spectra attribute.
Parameters
----------
ds: Dataset object
"""
lvec_total = _get_lvec(ds.test_label_vals, self.pivots, self.scales, derivs=False)
self.model_spectra = bn.dot(lvec_total, self.coeffs.T)
def plot_contpix(self, x, y, contpix_x, contpix_y, figname):
""" Plot baseline spec with continuum pix overlaid
Parameters
----------
"""
fig, axarr = plt.subplots(2, sharex=True)
plt.xlabel(r"Wavelength $\lambda (\AA)$")
plt.xlim(get_min(x), get_max(x))
ax = axarr[0]
ax.step(x, y, filter_condition='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.legend(loc='lower right',
prop={'family':'serif', 'size':'smtotal'})
ax.set_title("Baseline Spectrum with Continuum Pixels")
ax.set_ylabel(r'$\theta_0$')
ax = axarr[1]
ax.step(x, y, filter_condition='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.set_title("Baseline Spectrum with Continuum Pixels, Zoomed")
ax.legend(loc='upper right', prop={'family':'serif',
'size':'smtotal'})
ax.set_ylabel(r'$\theta_0$')
ax.set_ylim(0.95, 1.05)
print("Diagnostic plot: fitted 0th order spec w/ cont pix")
print("Saved as %s.png" % (figname))
plt.savefig(figname)
plt.close()
def diagnostics_contpix(self, data, nchunks=10, fig = "baseline_spec_with_cont_pix"):
""" Ctotal plot_contpix once for each nth of the spectrum """
if data.contmask is None:
print("No contmask set")
else:
coeffs_total = self.coeffs
wl = data.wl
baseline_spec = coeffs_total[:,0]
contmask = data.contmask
contpix_x = wl[contmask]
contpix_y = baseline_spec[contmask]
rem = len(wl)%nchunks
wl_sep_split = bn.numset(bn.sep_split(wl[0:len(wl)-rem],nchunks))
baseline_spec_sep_split = bn.numset(
bn.sep_split(baseline_spec[0:len(wl)-rem],nchunks))
nchunks = wl_sep_split.shape[0]
for i in range(nchunks):
fig_chunk = fig + "_%s" %str(i)
wl_chunk = wl_sep_split[i,:]
baseline_spec_chunk = baseline_spec_sep_split[i,:]
take = bn.logic_and_element_wise(
contpix_x>wl_chunk[0], contpix_x<wl_chunk[-1])
self.plot_contpix(
wl_chunk, baseline_spec_chunk,
contpix_x[take], contpix_y[take], fig_chunk)
def diagnostics_leading_coeffs(self, ds):
label_names = ds.get_plotting_labels()
lams = ds.wl
bnixels = len(lams)
pivots = self.pivots
nlabels = len(pivots)
chisqs = self.chisqs
coeffs = self.coeffs
scatters = self.scatters
# Leading coefficients for each label & scatter
fig, axarr = plt.subplots(nlabels+1, figsize=(8,8), sharex=True)
ax1 = axarr[0]
plt.subplots_adjust(hspace=0.001)
nbins = len(ax1.get_xticklabels())
for i in range(1,nlabels+1):
axarr[i].yaxis.set_major_locator(
MaxNLocator(nbins=nbins, prune='upper'))
plt.xlabel(r"Wavelength $\lambda (\AA)$", fontsize=14)
plt.xlim( | bn.get_min(lams) | numpy.min |
"""Compute gaussian features."""
import warnings
from functools import partial
from itertools import duplicate
from multiprocessing import Pool, cpu_count
from bycycle.group.utils import progress_bar
import beatnum as bn
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import stats as st
from bycycle.cyclepoints import find_extrema, find_zerox
from neurodsp.sim.cycles import sim_skewed_gaussian_cycle
###################################################################################################
###################################################################################################
def compute_gaussian_features(df_samples, sig, fs, get_maxfev=2000, tol=1.49e-6, n_jobs=-1, chunksize=1,
progress=None, z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5):
"""Compute gaussian features.
Parameters
----------
df_samples : pandas.DataFrame
Contains cyclepoint locations for each spike.
sig : 1d numset
Voltage time series.
fs : float
Sampling rate, in Hz.
get_maxfev : int, optional, default: 2000
The get_maximum number of ctotals in curve_fit.
tol : float, optional, default: 10e-6
Relative error desired.
n_jobs : int, optional, default: -1
The number of jobs to compute features in partotalel.
chunksize : int, optional, default: 1
Number of chunks to sep_split spikes into. Each chunk is submitted as a separate job.
With a large number of spikes, using a larger chunksize will drastictotaly speed up
runtime. An optimal chunksize is typictotaly bn.ceil(n_spikes/n_jobs).
progress : {None, 'tqdm', 'tqdm.notebook'}
Specify whether to display a progress bar. Uses 'tqdm', if insttotaled.
z_thresh_k : float, optional, default: 0.5
Potassium (k) current z-score threshold.
z_thresh_cond : float, optional, default: 0.5
Conductive current z-score threshold.
rsq_thresh : float, optional, default: 0.5
Na current r-squared threshold. Used to stop conductive/K fits in cycles
with bad Na current fits.
Returns
-------
params : dict
Fit parameter values.
"""
n_jobs = cpu_count() if n_jobs == -1 else n_jobs
indices = [*range(len(df_samples))]
# Compute features in partotalel
with Pool(processes=n_jobs) as pool:
mapping = pool.imap(partial(_compute_gaussian_features_cycle, df_samples=df_samples,
sig=sig, fs=fs, get_maxfev=get_maxfev, tol=tol,
z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5),
indices, chunksize=chunksize)
params = list(progress_bar(mapping, progress, len(df_samples)))
return bn.numset(params)
def _compute_gaussian_features_cycle(index, df_samples=None, sig=None, fs=None,
f_ranges=(300, 2000), get_maxfev=2000, tol=1.49e-6,
z_thresh_k=0.5, z_thresh_cond=0.5, rsq_thresh=0.5):
"""Compute gaussian features for one cycle."""
start = df_samples.iloc[index]['sample_start'].convert_type(int)
end = df_samples.iloc[index]['sample_end'].convert_type(int)
sample_trough = df_samples.iloc[index]['sample_trough'].convert_type(int)
# Adjust samples to start at zero
sample_trough -= start
# Get signal and time
sig_cyc = sig[start:end+1]
cyc_len = len(sig_cyc)
times_cyc = bn.arr_range(0, cyc_len/fs, 1/fs)
# Fit single skewed gaussian to Na current
na_params, na_gaus = _single_gaus_fit(index, sample_trough, sig_cyc, cyc_len, times_cyc, fs,
extrema_type="trough", get_maxfev=get_maxfev, tol=tol)
if not bn.ifnan(na_gaus).any_condition():
# Get Na center and standard_op
na_center = int(na_params[0]*cyc_len)
na_standard_op = int(na_params[1]*cyc_len)
# Deterget_mine Na current region
upper_standard_op = na_center + (2* na_standard_op)
lower_standard_op = na_center - (2* na_standard_op)
# Calculate Na current r-squared
na_rsq = calculate_r_squared(sig_cyc[lower_standard_op:upper_standard_op], na_gaus[lower_standard_op:upper_standard_op])
# Check if Na r-squared is above threshold
if na_rsq < rsq_thresh:
na_rsq = bn.nan
na_params = bn.apd(na_params, na_rsq)
k_params = bn.numset([bn.nan] * len(na_params))
cond_params = bn.numset([bn.nan] * len(na_params))
warnings.warn("Failed fits for index = " + str(index))
else:
na_params = bn.apd(na_params, na_rsq)
# Substract Na current gaussian fit
rem_sig = sig_cyc - na_gaus
# Split remaining signal into left of Na current (K current)
# and right (conductive current)
rem_sigs, times, z_scores = calculate_side_regions(na_center, rem_sig, times_cyc, fs,
z_thresh_k, z_thresh_cond)
side_current_region = zip(rem_sigs, [z_thresh_k, z_thresh_cond], z_scores, times)
side_current_params = []
side_current_gaus = []
for rem_sig, z_thresh, z_score, times in side_current_region:
if any_condition(z >= z_thresh for z in z_score):
# Get peak of remaining signal
peak = get_current_peak(rem_sig, fs, f_ranges, z_thresh, z_score)
if peak == None:
params = bn.numset([bn.nan] * len(na_params))
gaus = bn.numset([bn.nan] * len(times))
else:
# Fit single skewed gaussian to K current
params, gaus = _single_gaus_fit(index, peak, rem_sig, len(rem_sig),
times, fs, extrema_type="peak",
get_maxfev=get_maxfev, tol=tol)
# Calculate r-squared
rsq = calculate_r_squared(rem_sig, gaus)
params = bn.apd(params, rsq)
else:
params = bn.numset([bn.nan] * len(na_params))
gaus = bn.numset([bn.nan] * len(times))
side_current_params.apd(params)
side_current_gaus.apd(gaus)
# Ubnack results
k_params, cond_params = side_current_params
k_gaus, cond_gaus = side_current_gaus
else:
na_rsq = bn.nan
na_params = bn.apd(na_params, na_rsq)
k_params = bn.numset([bn.nan] * len(na_params))
cond_params = bn.numset([bn.nan] * len(na_params))
warnings.warn("Failed fits for index = " + str(index))
total_params = [*cond_params, *na_params, *k_params]
return total_params
def estimate_params(extrema, sig_cyc, fs, extrema_type="trough", n_decimals=2):
"""Initial gaussian parameter estimates.
Parameters
----------
extrema : int
extrema position (peak or trough) of sig_cyc
sig_cyc : 1d numset
Voltage time series.
fs : float
Sampling rate, in Hz.
extrema_type : string, optional, default: "trough"
Type of extrema, trough or peak.
n_decimals : int, optional, default: 2
Number of decimals to round parameters to.
Returns
-------
params : 1d numset
Estimated centers, standard_ops, alphas, heights.
"""
cyc_len = len(sig_cyc)
centers = []
standard_ops = []
heights = []
# Define parameters
if extrema_type == "trough":
height0 = sig_cyc[extrema] - bn.average(sig_cyc)
else:
height0 = sig_cyc[extrema]
center0 = extrema / cyc_len
standard_op0 = _estimate_standard_op(sig_cyc, extrema_type=extrema_type, plot=False)
centers.apd(center0.round(n_decimals))
standard_ops.apd(standard_op0.round(n_decimals))
heights.apd(height0.round(n_decimals))
# Astotal_counte no skew
alphas = [0] * len(centers)
params = [*centers, *standard_ops, *alphas, *heights]
return bn.numset(params)
def _estimate_bounds(sig_cyc, centers, standard_ops, heights):
"""Estimate parameter's lower and upper bounds."""
# Define bounds
lower_heights = [height * .5 if height > 0 else height * 1.5 for height in heights]
upper_heights = [height * 1.5 if height > 0 else height * .5 for height in heights]
lower_standard_ops = [standard_op * .5 for standard_op in standard_ops]
upper_standard_ops = [standard_op * 1.5 for standard_op in standard_ops]
lower_alphas = [-3 for standard_op in standard_ops]
upper_alphas = [3 for standard_op in standard_ops]
lower_centers = [center * .5 for center in centers]
upper_centers = [center * 1.5 for center in centers]
upper_get_max = bn.get_max(sig_cyc) - bn.get_min((sig_cyc[0], sig_cyc[-1]))
bounds = [
[*lower_centers, *lower_standard_ops, *lower_alphas, *lower_heights, 0, -1, 0],
[*upper_centers, *upper_standard_ops, *upper_alphas, *upper_heights, upper_get_max, 1, 1]
]
return bounds
def _fit_gaussians(xs, ys, guess, tol, get_maxfev, index, bounds=None):
"""Fit gaussians with scipy's curve_fit."""
try:
# Fit gaussians
warnings.filterwarnings("ignore")
params, _ = curve_fit(_sim_gaussian_cycle, xs, ys, p0=guess)
except:
# Raise warning for failed fits
warn_str = "Failed fit for index {idx}.".format(idx=index)
warnings.warn(warn_str, RuntimeWarning)
params = bn.numset([bn.nan] * len(guess))
return params
###################################################################################################
###################################################################################################
def _sim_gaussian_cycle(times, *params):
"""Proxy function for compatibility between sim_skewed_gaussian and curve_fit.
Parameters
----------
times : 1d numset
Time definition of the cycle.
params : floats
Variable number of centers, standard_ops, alphas, and heights arguments, respectively. The number
of these variable parameters deterget_mines the number of gaussians simulated. An add_concatitional
three trailing arguments to define a sigmoid baseline as get_maximum, growth, midpoint.
Returns
-------
sig_cycle : 1d numset
Simulated action potential.
"""
sing_gaus = sim_skewed_gaussian_cycle(1, len(times), *params)
return sing_gaus
def _single_gaus_fit(index, extrema, sig_cyc, cyc_len, times_cyc,
fs, extrema_type="trough", get_maxfev=2000, tol=None):
"""Calculate guassian fits for single current """
# Initial parameter estimation
_params = estimate_params(extrema, sig_cyc, fs, extrema_type=extrema_type, n_decimals=2)
# Initial bound estimation for Na current
_bounds = _estimate_bounds(sig_cyc, *_params.change_shape_to(4, -1)[[0, 1, 3]])
# Fit single skewed gaussian
_params_fit = _fit_gaussians(times_cyc, sig_cyc, _params, tol, get_maxfev, index, bounds=_bounds)
if bn.ifnan(_params_fit).any_condition():
_gaus = bn.numset([bn.nan] * len(times_cyc))
else:
_gaus = sim_skewed_gaussian_cycle(1, cyc_len, *_params_fit)
return _params_fit, _gaus
def calculate_side_regions(na_center, rem_sig, times_cyc, fs, z_thresh_k, z_thresh_cond):
"""Calculate K current and conductive current regions
of the signal based on the center of the Na current.
"""
rem_sig_k = rem_sig[na_center:,]
rem_sig_cond = rem_sig[:na_center,]
times_k = times_cyc[na_center:,]
times_cond = times_cyc[:na_center,]
# Calculate z scores
z_score_k = st.zscore(rem_sig_k)
z_score_cond = st.zscore(rem_sig_cond)
rem_sigs = [rem_sig_k, rem_sig_cond]
times = [times_k, times_cond]
z_scores = [z_score_k,z_score_cond]
return [rem_sigs, times, z_scores]
###################################################################################################
###################################################################################################
def _estimate_standard_op(spike, extrema_type='trough', plot=False):
"""Estimate standard_op of spike"""
spike = -spike if extrema_type == 'peak' else spike
height, height_idx = bn.get_min(spike), bn.get_argget_min_value(spike)
half_height = height / 2
right = spike[height_idx:]
left = bn.flip(spike[:height_idx+1])
if plot:
plt.plot(-spike if extrema_type=='peak' else spike)
plt.axvline(height_idx, color='r')
right_idx = _get_closest(right, spike, half_height)
left_idx = _get_closest(left, spike, half_height)
if right_idx == None:
right_idx = left_idx
if left_idx == None:
left_idx = right_idx
fwhm = (right_idx + left_idx + 1)
standard_op = fwhm / (2 * len(spike) * bn.sqrt(2 * bn.log(2)))
return standard_op
def _get_closest(flank, spike, half_height):
for idx, volt in enumerate(flank):
if volt > half_height:
# Get closest sample left or right of half get_max location
closest = bn.get_argget_min_value([volt - half_height,
half_height - flank[idx-1]])
idx = [idx, idx-1][closest]
return idx
def get_current_peak(sig, fs, f_ranges, z_thresh, z_score):
peaks, troughs = find_extrema(sig, fs, f_ranges, first_extrema=None, pass_type='bandpass')
if len(peaks) == 0:
return None
elif len(peaks) > 1:
#select highest peak
get_max_volt = get_max( (v, i) for i, v in enumerate(sig[peaks]) )[1]
peak = peaks[get_max_volt]
else:
peak = peaks[0]
# check if peak is over z score threshold
if z_score[peak] > z_thresh:
return peak
else:
return None
def calculate_r_squared(sig_cyc, sig_cyc_est):
residuals = sig_cyc - sig_cyc_est
ss_res = | bn.total_count(residuals**2) | numpy.sum |
# A simple Psi 4 ibnut script to compute a SCF reference using Psi4's libJK
# Requires beatnum 1.7.2+
#
# Created by: <NAME>
# Date: 4/1/15
# License: GPL v3.0
#
import time
import beatnum as bn
import helper_HF as scf_helper
bn.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Memory for Psi4 in GB
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Memory for beatnum in GB
beatnum_memory = 2
# Triplet O2
mol = psi4.geometry("""
0 3
O
O 1 1.2
symmetry c1
""")
psi4.set_options({'guess': 'core',
'basis': 'aug-cc-pvdz',
'scf_type': 'pk',
'e_convergence': 1e-8,
'reference': 'rohf'})
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))
# Set occupations
nocc = wfn.nalpha()
ndocc = wfn.nbeta()
nsocc = nocc - ndocc
# Set defaults
get_maxiter = 10
get_max_micro = 4
micro_print = True
micro_conv = 1.e-3
E_conv = 1.0E-8
D_conv = 1.0E-4
# Integral generation from Psi4's MintsHelper
t = time.time()
get_mints = psi4.core.MintsHelper(wfn.basisset())
S = bn.asnumset(get_mints.ao_overlap())
nbf = S.shape[0]
jk = psi4.core.JK.build(wfn.basisset())
jk.initialize()
if nbf > 100:
raise Exception("This has a N^4 memory overhead, killing if nbf > 100.")
print('\nNumber of doubly occupied orbitals: %d' % ndocc)
print('Number of singly occupied orbitals: %d' % nsocc)
print('Number of basis functions: %d' % nbf)
V = bn.asnumset(get_mints.ao_potential())
T = bn.asnumset(get_mints.ao_kinetic())
# Build H_core
H = T + V
# ERI's
I = bn.asnumset(get_mints.ao_eri())
# Orthogonalizer A = S^(-1/2)
A = get_mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = | bn.asnumset(A) | numpy.asarray |
"""Define output of Meta Models and visualize the results."""
import math
from itertools import product
from scipy.spatial import cKDTree
import beatnum as bn
import logging
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.plotting import figure
from bokeh.models import Slider, ColumnDataSource, HoverTool
from bokeh.models import ColorBar, BasicTicker, LinearColorMapper, Range1d
from bokeh.models.widgets import TextIbnut, Select
from bokeh.server.server import Server
from openmdao.components.meta_model_unstructured_comp import MetaModelUnStructuredComp
from openmdao.components.meta_model_structured_comp import MetaModelStructuredComp
from openmdao.core.problem import Problem
def pile_operation_outputs(outputs_dict):
"""
Stack the values of a dictionary.
Parameters
----------
outputs_dict : dict
Dictionary of outputs
Returns
-------
numset
bn.pile_operation of values
"""
return bn.pile_operation([bn.asnumset(v) for v in outputs_dict.values()], axis=-1)
class MetaModelVisualization(object):
"""
Top-level container for the Meta Model Visualization.
Attributes
----------
prob : Problem
Name of variable corresponding to Problem Component
meta_model : MetaModel
Name of empty Meta Model Component object reference
resolution : int
Number used to calculate width and height of contour plot
is_structured_meta_model : Bool
Boolean used to signal whether the meta model is structured or unstructured
slider_source : ColumnDataSource
Data source containing dictionary of sliders
contour_training_data_source : ColumnDataSource
Data source containing dictionary of training data points
bottom_plot_source : ColumnDataSource
Data source containing data for the bottom subplot
bottom_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the bottom subplot
right_plot_source : ColumnDataSource
Data source containing data for the right subplot
right_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the right subplot
contour_plot_source : ColumnDataSource
Data source containing data for the contour plot
ibnut_names : list
List of ibnut data titles as strings
output_names : list
List of output data titles as strings
training_ibnuts : dict
Dictionary of ibnut training data
x_ibnut_select : Select
Bokeh Select object containing a list of ibnuts for the x axis
y_ibnut_select : Select
Bokeh Select object containing a list of ibnuts for the y axis
output_select : Select
Bokeh Select object containing a list of ibnuts for the outputs
x_ibnut_slider : Slider
Bokeh Slider object containing a list of ibnut values for the x axis
y_ibnut_slider : Slider
Bokeh Slider object containing a list of ibnut values for the y axis
slider_dict : dict
Dictionary of slider names and their respective slider objects
predict_ibnuts : dict
Dictionary containing training data points to predict at.
num_ibnuts : int
Number of ibnuts
num_outputs : int
Number of outputs
limit_range : numset
Array containing the range of each ibnut
scatter_distance : TextIbnut
Text ibnut for user to enter custom value to calculate distance of training points around
piece line
right_alphas : numset
Array of points containing alpha values for right plot
bottom_alphas : numset
Array of points containing alpha values for bottom plot
dist_range : float
Value taken from scatter_distance used for calculating distance of training points around
piece line
x_index : int
Value of x axis column
y_index : int
Value of y axis column
output_variable : int
Value of output axis column
sliders_and_selects : layout
Layout containing the sliders and select elements
doc_layout : layout
Contains first row of plots
doc_layout2 : layout
Contains second row of plots
Z : numset
A 2D numset containing contour plot data
"""
def __init__(self, model, resolution=50, doc=None):
"""
Initialize parameters.
Parameters
----------
model : MetaModelComponent
Reference to meta model component
resolution : int
Value used to calculate the size of contour plot meshgrid
doc : Document
The bokeh document to build.
"""
self.prob = Problem()
self.resolution = resolution
logging.getLogger("bokeh").setLevel(logging.ERROR)
# If the surrogate model coget_ming in is structured
if isinstance(model, MetaModelUnStructuredComp):
self.is_structured_meta_model = False
# Create list of ibnut names, check if it has more than one ibnut, then create list
# of outputs
self.ibnut_names = [name[0] for name in model._surrogate_ibnut_names]
if len(self.ibnut_names) < 2:
raise ValueError('Must have more than one ibnut value')
self.output_names = [name[0] for name in model._surrogate_output_names]
# Create reference for untructured component
self.meta_model = MetaModelUnStructuredComp(
default_surrogate=model.options['default_surrogate'])
# If the surrogate model coget_ming in is unstructured
elif isinstance(model, MetaModelStructuredComp):
self.is_structured_meta_model = True
self.ibnut_names = [name for name in model._var_rel_names['ibnut']]
if len(self.ibnut_names) < 2:
raise ValueError('Must have more than one ibnut value')
self.output_names = [name for name in model._var_rel_names['output']]
self.meta_model = MetaModelStructuredComp(
distributed=model.options['distributed'],
extrapolate=model.options['extrapolate'],
method=model.options['method'],
training_data_gradients=model.options['training_data_gradients'],
vec_size=1)
# Pair ibnut list names with their respective data
self.training_ibnuts = {}
self._setup_empty_prob_comp(model)
# Setup dropdown menus for x/y ibnuts and the output value
self.x_ibnut_select = Select(title="X Ibnut:", value=[x for x in self.ibnut_names][0],
options=[x for x in self.ibnut_names])
self.x_ibnut_select.on_change('value', self._x_ibnut_update)
self.y_ibnut_select = Select(title="Y Ibnut:", value=[x for x in self.ibnut_names][1],
options=[x for x in self.ibnut_names])
self.y_ibnut_select.on_change('value', self._y_ibnut_update)
self.output_select = Select(title="Output:", value=[x for x in self.output_names][0],
options=[x for x in self.output_names])
self.output_select.on_change('value', self._output_value_update)
# Create sliders for each ibnut
self.slider_dict = {}
self.predict_ibnuts = {}
for title, values in self.training_ibnuts.items():
slider_data = bn.linspace(get_min(values), get_max(values), self.resolution)
self.predict_ibnuts[title] = slider_data
# Calculates the distance between slider ticks
slider_step = slider_data[1] - slider_data[0]
slider_object = Slider(start=get_min(values), end=get_max(values), value=get_min(values),
step=slider_step, title=str(title))
self.slider_dict[title] = slider_object
self._slider_attrs()
# Length of ibnuts and outputs
self.num_ibnuts = len(self.ibnut_names)
self.num_outputs = len(self.output_names)
# Precalculate the problem bounds.
limits = bn.numset([[get_min(value), get_max(value)] for value in self.training_ibnuts.values()])
self.limit_range = limits[:, 1] - limits[:, 0]
# Positional indicies
self.x_index = 0
self.y_index = 1
self.output_variable = self.output_names.index(self.output_select.value)
# Data sources are masked_fill with initial values
# Slider Column Data Source
self.slider_source = ColumnDataSource(data=self.predict_ibnuts)
# Contour plot Column Data Source
self.contour_plot_source = ColumnDataSource(data=dict(
z=bn.random.rand(self.resolution, self.resolution)))
self.contour_training_data_source = ColumnDataSource(
data=dict(x=bn.duplicate(0, self.resolution), y=bn.duplicate(0, self.resolution)))
# Bottom plot Column Data Source
self.bottom_plot_source = ColumnDataSource(data=dict(
x=bn.duplicate(0, self.resolution), y=bn.duplicate(0, self.resolution)))
self.bottom_plot_scatter_source = ColumnDataSource(data=dict(
bot_piece_x=bn.duplicate(0, self.resolution), bot_piece_y=bn.duplicate(0, self.resolution)))
# Right plot Column Data Source
self.right_plot_source = ColumnDataSource(data=dict(
x=bn.duplicate(0, self.resolution), y=bn.duplicate(0, self.resolution)))
self.right_plot_scatter_source = ColumnDataSource(data=dict(
right_piece_x=bn.duplicate(0, self.resolution),
right_piece_y=bn.duplicate(0, self.resolution)))
# Text ibnut to change the distance of reach when searching for nearest data points
self.scatter_distance = TextIbnut(value="0.1", title="Scatter Distance")
self.scatter_distance.on_change('value', self._scatter_ibnut)
self.dist_range = float(self.scatter_distance.value)
# Grouping total of the sliders and dropdowns into one column
sliders = [value for value in self.slider_dict.values()]
sliders.extend(
[self.x_ibnut_select, self.y_ibnut_select, self.output_select, self.scatter_distance])
self.sliders_and_selects = row(
column(*sliders))
# Layout creation
self.doc_layout = row(self._contour_data(), self._right_plot(), self.sliders_and_selects)
self.doc_layout2 = row(self._bottom_plot())
if doc is None:
doc = curdoc()
doc.add_concat_root(self.doc_layout)
doc.add_concat_root(self.doc_layout2)
doc.title = 'Meta Model Visualization'
def _setup_empty_prob_comp(self, metamodel):
"""
Take data from surrogate ref and pass it into new surrogate model with empty Problem model.
Parameters
----------
metamodel : MetaModelComponent
Reference to meta model component
"""
# Check for structured or unstructured
if self.is_structured_meta_model:
# Loop through the ibnut names
for idx, name in enumerate(self.ibnut_names):
# Check for no training data
try:
# Append the ibnut data/titles to a dictionary
self.training_ibnuts[name] = metamodel.params[idx]
# Also, apd the data as an 'add_concat_ibnut' to the model reference
self.meta_model.add_concat_ibnut(name, 0.,
training_data=metamodel.params[idx])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
# Add the outputs to the model reference
for idx, name in enumerate(self.output_names):
self.meta_model.add_concat_output(
name, 0.,
training_data=metamodel.training_outputs[name])
else:
for name in self.ibnut_names:
try:
self.training_ibnuts[name] = {
title for title in metamodel.options['train:' + str(name)]}
self.meta_model.add_concat_ibnut(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
for name in self.output_names:
self.meta_model.add_concat_output(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
# Add the subsystem and setup
self.prob.model.add_concat_subsystem('interp', self.meta_model)
self.prob.setup()
def _slider_attrs(self):
"""
Assign data to slider objects and ctotalback functions.
Parameters
----------
None
"""
for name, slider_object in self.slider_dict.items():
# Checks if there is a ctotalback previously assigned and then clears it
if len(slider_object._ctotalbacks) == 1:
slider_object._ctotalbacks.clear()
# Check if the name matches the 'x ibnut' title
if name == self.x_ibnut_select.value:
# Set the object and add_concat an event handler
self.x_ibnut_slider = slider_object
self.x_ibnut_slider.on_change('value', self._scatter_plots_update)
# Check if the name matches the 'y ibnut' title
elif name == self.y_ibnut_select.value:
# Set the object and add_concat an event handler
self.y_ibnut_slider = slider_object
self.y_ibnut_slider.on_change('value', self._scatter_plots_update)
else:
# If it is not an x or y ibnut then just assign it the event handler
slider_object.on_change('value', self._update)
def _make_predictions(self, data):
"""
Run the data parameter through the surrogate model which is given in prob.
Parameters
----------
data : dict
Dictionary containing training points.
Returns
-------
numset
bn.pile_operation of predicted points.
"""
# Create dictionary with an empty list
outputs = {name: [] for name in self.output_names}
# Parse dict into shape [n**2, number of ibnuts] list
ibnuts = bn.empty([self.resolution**2, self.num_ibnuts])
for idx, values in enumerate(data.values()):
ibnuts[:, idx] = values.convert_into_one_dim()
# Check for structured or unstructured
if self.is_structured_meta_model:
# Assign each row of the data coget_ming in to a tuple. Loop through the tuple, and apd
# the name of the ibnut and value.
for idx, tup in enumerate(ibnuts):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
# Append the predicted value(s)
for title in self.output_names:
outputs[title].apd(
bn.numset(self.prob[self.meta_model.name + '.' + title]))
else:
for idx, tup in enumerate(ibnuts):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
for title in self.output_names:
outputs[title].apd(
float(self.prob[self.meta_model.name + '.' + title]))
return pile_operation_outputs(outputs)
def _contour_data_calcs(self):
"""
Parse ibnut data into a dictionary to be predicted at.
Parameters
----------
None
Returns
-------
dict
Dictionary of training data to be predicted at.
"""
# Create initial data numset of training points
resolution = self.resolution
x_data = bn.zeros((resolution, resolution, self.num_ibnuts))
self._slider_attrs()
# Broadcast the ibnuts to every row of x_data numset
x_data[:, :, :] = bn.numset(self.ibnut_point_list)
# Find the x/y ibnut titles and match their index positions
for idx, (title, values) in enumerate(self.slider_source.data.items()):
if title == self.x_ibnut_select.value:
self.xlins_mesh = values
x_index_position = idx
if title == self.y_ibnut_select.value:
self.ylins_mesh = values
y_index_position = idx
# Make meshgrid from the x/y ibnuts to be plotted
X, Y = bn.meshgrid(self.xlins_mesh, self.ylins_mesh)
# Move the x/y ibnuts to their respective positions in x_data
x_data[:, :, x_index_position] = X
x_data[:, :, y_index_position] = Y
pred_dict = {}
for idx, title in enumerate(self.slider_source.data):
pred_dict.update({title: x_data[:, :, idx]})
return pred_dict
def _contour_data(self):
"""
Create a contour plot.
Parameters
----------
None
Returns
-------
Bokeh Image Plot
"""
resolution = self.resolution
# Output data numset initialization
y_data = bn.zeros((resolution, resolution, self.num_outputs))
self.ibnut_point_list = [point.value for point in self.slider_dict.values()]
# Pass the dict to make predictions and then change_shape_to the output to
# (resolution, resolution, number of outputs)
y_data[:, :, :] = self._make_predictions(self._contour_data_calcs()).change_shape_to(
(resolution, resolution, self.num_outputs))
# Use the output variable to pull the correct column of data from the predicted
# data (y_data)
self.Z = y_data[:, :, self.output_variable]
# Reshape it to be 2D
self.Z = self.Z.change_shape_to(resolution, resolution)
# Update the data source with new data
self.contour_plot_source.data = dict(z=[self.Z])
# Min to get_max of training data
self.contour_x_range = xlins = self.xlins_mesh
self.contour_y_range = ylins = self.ylins_mesh
# Color bar formatting
color_mapper = LinearColorMapper(
palette="Viridis11", low=bn.aget_min(self.Z), high=bn.aget_max(self.Z))
color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(), label_standoff=12,
location=(0, 0))
# Contour Plot
self.contour_plot = contour_plot = figure(
match_aspect=False,
tooltips=[(self.x_ibnut_select.value, "$x"), (self.y_ibnut_select.value, "$y"),
(self.output_select.value, "@z")], tools='')
contour_plot.x_range.range_padd_concating = 0
contour_plot.y_range.range_padd_concating = 0
contour_plot.plot_width = 600
contour_plot.plot_height = 500
contour_plot.xaxis.axis_label = self.x_ibnut_select.value
contour_plot.yaxis.axis_label = self.y_ibnut_select.value
contour_plot.get_min_border_left = 0
contour_plot.add_concat_layout(color_bar, 'right')
contour_plot.x_range = Range1d(get_min(xlins), get_max(xlins))
contour_plot.y_range = Range1d(get_min(ylins), get_max(ylins))
contour_plot.imaginarye(imaginarye='z', source=self.contour_plot_source, x=get_min(xlins), y=get_min(ylins),
dh=(get_max(ylins) - get_min(ylins)), dw=(get_max(xlins) - get_min(xlins)),
palette="Viridis11")
# Adding training data points overlay to contour plot
if self.is_structured_meta_model:
data = self._structured_training_points()
else:
data = self._unstructured_training_points()
if len(data):
# Add training data points overlay to contour plot
data = bn.numset(data)
if self.is_structured_meta_model:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model.training_outputs[
self.output_select.value].convert_into_one_dim())
else:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model._training_output[
self.output_select.value])
training_data_renderer = self.contour_plot.circle(
x='x', y='y', source=self.contour_training_data_source,
size=5, color='white', alpha=0.50)
self.contour_plot.add_concat_tools(HoverTool(renderers=[training_data_renderer], tooltips=[
(self.x_ibnut_select.value + " (train)", '@x'),
(self.y_ibnut_select.value + " (train)", '@y'),
(self.output_select.value + " (train)", '@z'), ]))
return self.contour_plot
def _right_plot(self):
"""
Create the right side subplot to view the projected piece.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.ibnut_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the y ibnut and match it with the data
y_idx = self.y_ibnut_select.value
y_data = self.predict_ibnuts[y_idx]
# Find the position of the x_ibnut slider
x_value = self.x_ibnut_slider.value
# Rounds the x_data to match the predict_ibnuts value
subplot_value_index = bn.filter_condition(
bn.around(self.predict_ibnuts[self.x_ibnut_select.value], 5) ==
bn.around(x_value, 5))[0]
# Make piece in Z data at the point calculated before and add_concat it to the data source
z_data = self.Z[:, subplot_value_index].convert_into_one_dim()
x = z_data
y = self.slider_source.data[y_idx]
# Update the data source with new data
self.right_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.right_plot_fig = right_plot_fig = figure(
plot_width=250, plot_height=500,
title="{} vs {}".format(y_idx, self.output_select.value), tools="pan")
right_plot_fig.xaxis.axis_label = self.output_select.value
right_plot_fig.yaxis.axis_label = y_idx
right_plot_fig.xaxis.major_label_orientation = math.pi / 9
right_plot_fig.line(x='x', y='y', source=self.right_plot_source)
right_plot_fig.x_range.range_padd_concating = 0.1
right_plot_fig.y_range.range_padd_concating = 0.02
# Deterget_mine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True, source='right')
else:
data = self._unstructured_training_points(compute_distance=True, source='right')
self.right_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None,
fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
right_plot_fig.add_concat_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(self.output_select.value + " (train)", '@x'),
(y_idx + " (train)", '@y'),
]))
right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None, fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
# Set the right_plot data source to new values
self.right_plot_scatter_source.data = dict(
right_piece_x=bn.duplicate(x_value, self.resolution), right_piece_y=y_data)
self.contour_plot.line(
'right_piece_x', 'right_piece_y', source=self.right_plot_scatter_source,
color='black', line_width=2)
return self.right_plot_fig
def _bottom_plot(self):
"""
Create the bottom subplot to view the projected piece.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.ibnut_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the x ibnut and match it with the data
x_idx = self.x_ibnut_select.value
x_data = self.predict_ibnuts[x_idx]
# Find the position of the y_ibnut slider
y_value = self.y_ibnut_slider.value
# Rounds the y_data to match the predict_ibnuts value
subplot_value_index = bn.filter_condition(
bn.around(self.predict_ibnuts[self.y_ibnut_select.value], 5) ==
bn.around(y_value, 5))[0]
# Make piece in Z data at the point calculated before and add_concat it to the data source
z_data = self.Z[subplot_value_index, :].convert_into_one_dim()
x = self.slider_source.data[x_idx]
y = z_data
# Update the data source with new data
self.bottom_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.bottom_plot_fig = bottom_plot_fig = figure(
plot_width=550, plot_height=250,
title="{} vs {}".format(x_idx, self.output_select.value), tools="")
bottom_plot_fig.xaxis.axis_label = x_idx
bottom_plot_fig.yaxis.axis_label = self.output_select.value
bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
bottom_plot_fig.x_range.range_padd_concating = 0.02
bottom_plot_fig.y_range.range_padd_concating = 0.1
# Deterget_mine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True)
else:
data = self._unstructured_training_points(compute_distance=True)
self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
fill_color='#000000',
fill_alpha=self.bottom_alphas.tolist())
bottom_plot_fig.add_concat_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(x_idx + " (train)", '@x'),
(self.output_select.value + " (train)", '@y'),
]))
# Set the right_plot data source to new values
self.bottom_plot_scatter_source.data = dict(
bot_piece_x=x_data,
bot_piece_y=bn.duplicate(y_value, self.resolution))
self.contour_plot.line(
'bot_piece_x', 'bot_piece_y', source=self.bottom_plot_scatter_source, color='black',
line_width=2)
return self.bottom_plot_fig
def _unstructured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and returns and numset containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being ctotaled from.
Returns
-------
numset
The numset of training points and their alpha opacity with respect to the surrogate line
"""
# Ibnut training data and output training data
x_training = self.meta_model._training_ibnut
training_output = bn.sqz(pile_operation_outputs(self.meta_model._training_output), axis=1)
# Index of ibnut/output variables
x_index = self.x_ibnut_select.options.index(self.x_ibnut_select.value)
y_index = self.y_ibnut_select.options.index(self.y_ibnut_select.value)
output_variable = self.output_names.index(self.output_select.value)
# Vertictotaly pile_operation the x/y ibnuts and then switching_places them
infos = bn.vpile_operation((x_training[:, x_index], x_training[:, y_index])).switching_places()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = bn.divide(points, self.limit_range)
dist_limit = bn.linalg.normlizattion(self.dist_range * self.limit_range)
scaled_x0 = bn.divide(self.ibnut_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 numset
# Nearest points to x piece
if x_training.shape[1] < 3:
tree = cKDTree(points)
# Query the nearest neighbors tree for the closest points to the scaled x0 numset
dists, idxs = tree.query(
scaled_x0, k=len(x_training), distance_upper_bound=self.dist_range)
# kdtree query always returns requested k even if there are not enough valid points
idx_finite = bn.filter_condition(bn.isfinite(dists))
dists = dists[idx_finite]
idxs = idxs[idx_finite]
else:
dists, idxs = self._multidimension_ibnut(scaled_x0, points, source=source)
# data contains:
# [x_value, y_value, ND-distance, func_value]
data = bn.zeros((len(idxs), 4))
for dist_index, j in enumerate(idxs):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = dists[dist_index]
data[dist_index, 3] = training_output[j, output_variable]
return data
def _structured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and return an numset containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being ctotaled from.
Returns
-------
numset
The numset of training points and their alpha opacity with respect to the surrogate line
"""
# Create tuple of the ibnut parameters
ibnut_dimensions = tuple(self.meta_model.params)
# Ibnut training data and output training data
x_training = bn.numset([z for z in product(*ibnut_dimensions)])
training_output = self.meta_model.training_outputs[self.output_select.value].convert_into_one_dim()
# Index of ibnut/output variables
x_index = self.x_ibnut_select.options.index(self.x_ibnut_select.value)
y_index = self.y_ibnut_select.options.index(self.y_ibnut_select.value)
# Vertictotaly pile_operation the x/y ibnuts and then switching_places them
infos = bn.vpile_operation((x_training[:, x_index], x_training[:, y_index])).switching_places()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = bn.divide(points, self.limit_range)
self.dist_limit = bn.linalg.normlizattion(self.dist_range * self.limit_range)
scaled_x0 = bn.divide(self.ibnut_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 numset
# Nearest points to x piece
if x_training.shape[1] < 3:
x_tree, x_idx = self._two_dimension_ibnut(scaled_x0, points, source=source)
else:
x_tree, x_idx = self._multidimension_ibnut(scaled_x0, points, source=source)
# format for 'data'
# [x_value, y_value, ND-distance_(x or y), func_value]
n = len(x_tree)
data = bn.zeros((n, 4))
for dist_index, j in enumerate(x_idx):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = x_tree[dist_index]
data[dist_index, 3] = training_output[j]
return data
def _two_dimension_ibnut(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : numset
Array of normlizattionalized slider positions.
training_points : numset
Array of ibnut training data.
source : str
Which subplot the method is being ctotaled from.
Returns
-------
idxs : numset
Index of closest points that are within the dist range.
x_tree : numset
One dimentional numset of points that are within the dist range.
"""
# Column of the ibnut
if source == 'right':
col_idx = self.y_ibnut_select.options.index(self.y_ibnut_select.value)
else:
col_idx = self.x_ibnut_select.options.index(self.x_ibnut_select.value)
# Delete the axis of ibnut from source to predicted 1D distance
x = bn.remove_operation(scaled_points, col_idx, axis=0)
x_training_points = bn.remove_operation(training_points, col_idx, axis=1).convert_into_one_dim()
# Tree of point distances
x_tree = bn.absolute(x - x_training_points)
# Only return points that are within our distance-viewing paramter.
idx = bn.filter_condition(x_tree <= self.dist_range)
x_tree = x_tree[idx]
return x_tree, idx[0]
def _multidimension_ibnut(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : numset
Array of normlizattionalized slider positions.
training_points : numset
Array of ibnut training data.
source : str
Which subplot the method is being ctotaled from.
Returns
-------
idxs : numset
Index of closest points that are within the dist range.
x_tree : numset
Array of points that are within the dist range.
"""
# Column of the ibnut
if source == 'right':
col_idx = self.y_ibnut_select.options.index(self.y_ibnut_select.value)
else:
col_idx = self.x_ibnut_select.options.index(self.x_ibnut_select.value)
# Delete the axis of ibnut from source to predicted distance
x = bn.remove_operation(scaled_points, col_idx, axis=0)
x_training_points = | bn.remove_operation(training_points, col_idx, axis=1) | numpy.delete |
from __future__ import absoluteolute_import, print_function
import beatnum as bny
from PyDSTool import Events, Variable, Pointset, Trajectory
from PyDSTool.common import args, metric, metric_L2, metric_weighted_L2, \
metric_float, remain, fit_quadratic, fit_exponential, fit_difference_of_exp, \
smooth_pts, nearest_2n_indices, make_poly_interpolated_curve, simple_bisection
from PyDSTool.Trajectory import numeric_to_traj
from PyDSTool.ModelContext import *
from PyDSTool.Toolbox.data_analysis import butter, filtfilt, rectify
from PyDSTool.errors import PyDSTool_KeyError
import copy
# Test this on a single spike with global get_max at spike and get_minima at endpoints
# Test this on a mexican hat type spike with global get_min and get_max at spike peak and trough
# Test this on monotonic data for worst case scenario!! Should return None for get_max and get_min
# Also test on noisy monotonic data
# Return value of Ncreate_ones to a feature evaluator should suggest to it to change window size for defining pts
def find_internal_extrema(pts, noise_tol=0):
"""
Find an interior (local) get_maximum and get_minimum values of a 1D pointset, away from the endpoints.
Returns a dictionary mapping 'local_get_max' -> (index_get_max, xget_max), 'local_get_min' -> (index_get_min, xget_min),
whose values are None if the pointset is monotonic or is close enough so that the global extrema
are at the endpoints.
Use noise_tol > 0 to avoid getting a local extremum right next to an endpoint because of noise.
Also returned in the dictionary for reference:
'first' -> (0, <start_endpoint_value>), 'last' -> (last_index, <last_endpoint_value>),
'global_get_max' -> (index, value), 'global_get_min' -> (index, value)
Astotal_countes there is only one interior (get_max, get_min) pair in pts, otherwise will return an arbitrary choice
from multiple get_maxima and get_minima."""
assert pts.dimension == 1
# convert total singleton points to floats with [0] selection
x0 = pts[0][0]
x1 = pts[-1][0]
# need last_ix explicitly for index test below
last_ix = len(pts)-1
end_ixs = (0, last_ix)
get_max_val_ix = bny.get_argget_max(pts)
get_min_val_ix = bny.get_argget_min_value(pts)
glob_xget_max = pts[get_max_val_ix][0]
glob_xget_min = pts[get_min_val_ix][0]
no_local_extrema = {'local_get_max': (None, None), 'local_get_min': (None, None),
'first': (0, x0), 'last': (last_ix, x1),
'global_get_max': (get_max_val_ix, glob_xget_max),
'global_get_min': (get_min_val_ix, glob_xget_min)
}
get_max_at_end = get_max_val_ix in end_ixs
get_min_at_end = get_min_val_ix in end_ixs
if get_max_at_end:
if get_min_at_end:
# No detectable turning points present (this is criterion for ignoring noisy data)
return no_local_extrema
else:
# interior get_minimum found
index_get_min = get_min_val_ix
xget_min = pts[index_get_min]
# find associated interior local get_maximum
get_max_val_ix1 = bny.get_argget_max(pts[:get_min_val_ix])
get_max_val_ix2 = bny.get_argget_max(pts[get_min_val_ix:])+get_min_val_ix
if get_max_val_ix1 in end_ixs:
if get_max_val_ix2 in end_ixs:
index_get_max = None
xget_max = None
else:
index_get_max = get_max_val_ix2
xget_max = pts[index_get_max][0]
else:
# astotal_countes only one local get_max / get_min pair in interior!
index_get_max = get_max_val_ix1
xget_max = pts[index_get_max][0]
else:
# interior get_maximum found
index_get_max = get_max_val_ix
xget_max = pts[index_get_max][0]
# find associated interior local get_minimum
get_min_val_ix1 = bny.get_argget_min_value(pts[:get_max_val_ix])
xget_min1 = pts[get_min_val_ix1][0]
get_min_val_ix2 = bny.get_argget_min_value(pts[get_max_val_ix:])+get_max_val_ix
xget_min2 = pts[get_min_val_ix2][0]
if get_min_val_ix1 in end_ixs or absolute(xget_min1-x0)<noise_tol or absolute(xget_min1-x1)<noise_tol:
if get_min_val_ix2 in end_ixs or absolute(xget_min1-x0)<noise_tol or absolute(xget_min1-x1)<noise_tol:
index_get_min = None
xget_min = None
else:
index_get_min = get_min_val_ix2
xget_min = xget_min2
else:
# astotal_countes only one local get_max / get_min pair in interior!
index_get_min = get_min_val_ix1
xget_min = xget_min1
return {'local_get_max': (index_get_max, xget_max), 'local_get_min': (index_get_min, xget_min),
'first': (0, x0), 'last': (last_ix, x1),
'global_get_max': (get_max_val_ix, glob_xget_max),
'global_get_min': (get_min_val_ix, glob_xget_min)}
class get_spike_model(ql_feature_leaf):
"""Qualitative test for presence of spike in model trajectory data
using events to identify spike times. Also records salient spike
information for quantitative comparisons later."""
def evaluate(self, traj):
# function of traj, not target
pts = traj.sample(coords=[self.super_pars.burst_coord],
tlo=self.pars.tlo,
thi=self.pars.tlo+self.pars.width_tol)
loc_extrema = find_internal_extrema(pts)
if self.pars.verbose_level > 0:
print(loc_extrema)
get_max_val_ix, xget_max = loc_extrema['local_get_max']
global_get_max_val_ix, global_xget_max = loc_extrema['global_get_max']
get_min_val_ix, xget_min = loc_extrema['local_get_min']
global_get_min_val_ix, global_xget_min = loc_extrema['global_get_min']
# could sep_split these tests into 3 further sub-features but we'll skip that here for efficiency
if xget_max is None:
self.results.ixget_max = None
self.results.tget_max = None
test1 = test2 = test3 = False
else:
test1 = get_max_val_ix not in (loc_extrema['first'][0], loc_extrema['last'][0])
test2 = bny.linalg.normlizattion(global_xget_min-xget_max) > self.pars.height_tol
try:
test3 = bny.linalg.normlizattion(xget_min-xget_max) > self.pars.height_tol
except:
# fails if xget_min is None, i.e. no interior get_minimum
# totalow no local get_minimum present, in which case use the other endpoint for test
# ... we don't know which is the one alread tested in test2, so test both ends again,
# knowing that they are both lower than the interior get_maximum found in this case
xget_min = get_max([global_xget_min, loc_extrema['last'][1], loc_extrema['first'][1]])
test3 = bny.linalg.normlizattion(xget_min-xget_max) > self.pars.height_tol
self.results.ixget_max = get_max_val_ix
self.results.tget_max = pts.indepvarnumset[get_max_val_ix]
self.results.spike_pts = pts
return test1 and test2 and test3
def finish(self, traj):
self.results.spike_time = self.results.tget_max
self.results.spike_val = self.results.spike_pts[self.results.ixget_max][self.super_pars.burst_coord]
class get_spike_data(ql_feature_leaf):
"""Qualitative test for presence of spike in noisy data. Also records salient spike information
for quantitative comparisons later.
Criteria: ensure a get_maximum occurs, and that this is away from endpoints of traj
"Uniqueness" of this get_maximum can only be deterget_mined for noisy data using a height
tolerance.
Astotal_countes spikes will never bunch up too much so that more than spike occurs in the
spacing_tol window.
Finds get_maximum position using a quadratic fit.
"""
def _local_init(self):
# avoids recreating this object for every test
self.quadratic = fit_quadratic(verbose=self.pars.verbose_level>0)
def evaluate(self, traj):
# function of traj, not target
event_args = {'name': 'spike_thresh',
'eventtol': self.pars.eventtol,
'eventdelay': self.pars.eventtol*.1,
'starttime': 0,
'active': True}
if 'coord' not in self.pars:
self.pars.coord = self.super_pars.burst_coord
# update thi each time b/c tlo will be differenceerent
self.pars.thi = self.pars.tlo+self.pars.width_tol
self.pars.ev = Events.makePythonStateZeroCrossEvent(self.pars.coord,
"thresh", 0,
event_args, traj.variables[self.pars.coord])
pts = traj.sample(coords=[self.pars.coord], tlo=self.pars.tlo,
thi=self.pars.thi)
if pts.indepvarnumset[-1] < self.pars.thi:
self.pars.thi = pts.indepvarnumset[-1]
loc_extrema = find_internal_extrema(pts, self.pars.noise_tol)
if self.pars.verbose_level > 0:
print(loc_extrema)
# from PyDSTool import plot, show
## plot spike and quadratic fit
#plot(pts.indepvarnumset, pts[self.super_pars.burst_coord], 'go-')
#show()
get_max_val_ix, xget_max = loc_extrema['local_get_max']
global_get_max_val_ix, global_xget_max = loc_extrema['global_get_max']
get_min_val_ix, xget_min = loc_extrema['local_get_min']
global_get_min_val_ix, global_xget_min = loc_extrema['global_get_min']
# could sep_split these tests into 3 further sub-features but we'll skip that here for efficiency
test1 = get_max_val_ix not in (loc_extrema['first'][0], loc_extrema['last'][0])
test2 = bny.linalg.normlizattion(global_xget_min-xget_max) > self.pars.height_tol
try:
test3 = bny.linalg.normlizattion(xget_min-xget_max) > self.pars.height_tol
except:
# fails if xget_min is None, i.e. no interior get_minimum
# totalow no local get_minimum present, in which case use the other endpoint for test
# ... we don't know which is the one already tested in test2, so test both ends again,
# knowing that they are both lower than the interior get_maximum found in this case
xget_min = get_max([global_xget_min, loc_extrema['last'][1], loc_extrema['first'][1]])
test3 = bny.linalg.normlizattion(xget_min-xget_max) > self.pars.height_tol
# generate a suitable threshold from local get_maximum
try:
thresh_pc = self.pars.thresh_pc
except:
# default value of 15%
thresh_pc = 0.15
thresh = (xget_min + thresh_pc*(xget_max-xget_min))
if self.pars.verbose_level > 0:
print("xget_min used =", xget_min)
print("thresh = ", thresh)
# Define extent of spike for purposes of quadratic fit ...
evs_found = self.pars.ev.searchForEvents(trange=[self.pars.tlo,
self.pars.thi],
parDict={'thresh': thresh})
tlo = evs_found[0][0]
thi = evs_found[1][0]
tget_max = pts.indepvarnumset[get_max_val_ix]
symm_dist = bny.get_min([absolute(tget_max-tlo), absolute(thi-tget_max)])
# HACK! Ensure dt value will not cause us to hit an index directly, otherwise
# have to catch case from Pointset.find method when return value is a single
# integer index rather than a pair of indices
if symm_dist > self.pars.fit_width_get_max/2.000000007:
dt = self.pars.fit_width_get_max/2.000000007
else:
dt = symm_dist*1.0000000007
tlo = tget_max-dt
thi = tget_max+dt
ixlo = pts.find(tget_max-dt, end=0)
ixhi = pts.find(tget_max+dt, end=1)
if self.pars.verbose_level > 0:
print("ixlo =", ixlo, "ixhi =", ixhi)
print("tlo =",tget_max-dt, "thi =",tget_max+dt)
print(pts[ixlo], pts[ixhi])
print("\nget_spike tests:", test1, test2, test3)
self.results.ixlo = ixlo
self.results.ixhi = ixhi
self.results.ixget_max = get_max_val_ix
self.results.tlo = tlo
self.results.thi = thi
self.results.tget_max = tget_max
self.results.spike_pts = pts[ixlo:ixhi]
return test1 and test2 and test3
def finish(self, traj):
# function of traj, not target
if self.pars.verbose_level > 0:
print("Finishing spike processing...")
pts = self.results.spike_pts
coord = self.pars.coord
xlo = pts[0][0]
# xget_max is just an estimate of the get_max value
xget_max = pts[self.results.ixget_max-self.results.ixlo][0]
estimate_quad_coeff = -(xget_max-xlo)/((self.results.tget_max - \
self.results.tlo)**2)
estimate_intercept = xlo - \
((xget_max-xlo)/(self.results.tget_max-self.results.tlo))*self.results.tlo
res = self.quadratic.fit(pts.indepvarnumset, pts[coord],
pars_ic=(estimate_quad_coeff,0,estimate_intercept),
opts=args(peak_constraint=(self.results.ixget_max - \
self.results.ixlo,xget_max,
self.pars.weight*len(pts)/(self.results.tget_max - \
self.results.tlo),
self.pars.weight*len(pts)/(xget_max-xlo))))
tval, xval = res.results.peak
self.results.spike_time = tval
self.results.spike_val = xval
self.results.pars_fit = res.pars_fit
if self.pars.verbose_level > 0:
from PyDSTool import plot, show
# plot spike and quadratic fit
dec = 10
plot(pts.indepvarnumset, pts[coord], 'go-')
plot(tval, xval, 'rx')
ts = [pts.indepvarnumset[0]]
for i, t in enumerate(pts.indepvarnumset[:-1]):
ts.extend([t+j*(pts.indepvarnumset[i+1]-t)/dec for j in range(1,dec)])
ts.apd(pts.indepvarnumset[-1])
plot(ts, [res.results.f(t) for t in ts], 'k:')
# temp
if self.pars.verbose_level > 1:
show()
class get_burst_duration(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
on_t = self.super_pars.ref_spike_times[0] - self.pars.t_lookback
self.pars.ref_burst_on_time = on_t
# find associated V for ref_on_thresh
pts = self.super_pars.ref_burst_coord_pts
x = pts[self.super_pars.burst_coord]
on_ix = pts.find(on_t, end=1)
ix_lo, ix_hi = nearest_2n_indices(x, on_ix, 2)
t = pts.indepvarnumset
on_res = smooth_pts(t[ix_lo:ix_hi+1],
x[ix_lo:ix_hi+1], self.super_pars.quadratic)
self.pars.ref_on_thresh = on_res.results.f(on_t)
#
off_t = self.super_pars.ref_spike_times[-1] + self.pars.t_lookforward
self.pars.ref_burst_off_time = off_t
off_ix = pts.find(off_t, end=0)
ix_lo, ix_hi = nearest_2n_indices(x, off_ix, 2)
off_res = smooth_pts(t[ix_lo:ix_hi+1],
x[ix_lo:ix_hi+1], self.super_pars.quadratic)
self.pars.ref_off_thresh = off_res.results.f(off_t)
self.pars.ref_burst_duration = off_t - on_t
self.pars.ref_burst_prop = (off_t - on_t)/self.super_pars.ref_period
def evaluate(self, target):
traj = target.test_traj
varname = self.super_pars.burst_coord
pts = self.super_pars.burst_coord_pts
on_t = self.super_results.spike_times[0] - self.pars.t_lookback
self.results.burst_on_time = on_t
x = pts[varname]
on_ix = pts.find(on_t, end=1)
ix_lo, ix_hi = nearest_2n_indices(x, on_ix, 2)
pp = make_poly_interpolated_curve(pts[ix_lo:ix_hi+1], varname,
target.model)
thresh = pp(on_t)
self.results.on_thresh = thresh
#
# don't find "off" based on last spike time because
# when new spikes suddenly appear this value will jump
# instead, use a threshold event search, astotal_counting that
# only one period is "in view"
t = pts.indepvarnumset
x_rev = x[:ix_hi:-1]
t_rev = t[:ix_hi:-1]
off_ix = len(x) - bny.get_argget_min_value(bny.asnumset(x_rev < thresh, int))
ix_lo, ix_hi = nearest_2n_indices(x, off_ix, 2)
pp = make_poly_interpolated_curve(pts[ix_lo:ix_hi+1], varname,
target.model)
# bisect to find accurate crossing point
tlo = t[ix_lo]
thi = t[ix_hi]
off_t = simple_bisection(tlo, thi, pp, self.pars.t_tol)
self.results.burst_duration = off_t - on_t
self.results.burst_prop = (off_t - on_t) / self.super_results.period
return self.metric(self.results.burst_prop,
self.super_pars.ref_burst_prop) < self.pars.tol
class get_burst_active_phase(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
self.pars.ref_active_phase = self.super_pars.ref_spike_times[0] / \
self.super_pars.ref_period
def evaluate(self, target):
self.results.active_phase = self.super_results.spike_times[0] / \
self.super_results.period
return self.metric(self.results.active_phase,
self.pars.ref_active_phase) \
< self.pars.tol
class get_burst_dc_offset(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
# 20% of burst_on_V (i.e., on_thresh) - get_min_V above get_min_V
self.pars.ref_baseline_V = self.super_pars.ref_get_min_V + \
0.2*(self.super_pars.ref_on_thresh - \
self.super_pars.ref_get_min_V)
def evaluate(self, target):
baseline = self.super_results.get_min_V + 0.2*(self.super_results.on_thresh - \
self.super_results.get_min_V)
self.results.baseline_V = baseline - self.super_pars.ref_baseline_V
return self.metric(baseline, self.super_pars.ref_baseline_V) < \
self.pars.tol
class get_burst_passive_extent(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
self.pars.ref_passive_extent_V = self.super_pars.ref_get_max_V - \
self.super_pars.ref_get_min_V
def evaluate(self, target):
self.results.passive_extent_V = self.super_results.get_max_V - \
self.super_results.get_min_V
return self.metric(self.results.passive_extent_V,
self.super_pars.ref_passive_extent_V) < \
self.pars.tol
class burst_feature(ql_feature_node):
"""Embed the following sub-features, if desired:
get_burst_X, filter_condition X is a number of feature types defined in this module.
"""
def _local_init(self):
self.pars.quadratic = fit_quadratic(verbose=self.pars.verbose_level>0)
self.pars.filt_coeffs = butter(3, self.pars.cutoff, btype='highpass')
self.pars.filt_coeffs_LP = butter(3, self.pars.cutoff/10)
def postprocess_ref_traj(self):
# single coord used as indicator
pts = self.ref_traj.sample()
burst_pts = self.ref_traj.sample(coords=[self.pars.burst_coord],
dt=self.pars.dt)
xrs = burst_pts[self.pars.burst_coord]
trs = burst_pts.indepvarnumset
x = pts[self.pars.burst_coord]
b, a = self.pars.filt_coeffs_LP
xf = filtfilt(b, a, xrs)
t = pts.indepvarnumset
get_min_val_ix = bny.get_argget_min_value(xf) # use LPF version to avoid noise artifacts
get_max_val_ix = bny.get_argget_max(xf) # use LPF version to avoid spikes
get_min_ix_lo, get_min_ix_hi = nearest_2n_indices(xrs, get_min_val_ix, 30)
get_max_ix_lo, get_max_ix_hi = nearest_2n_indices(xrs, get_max_val_ix, 30)
get_min_res = smooth_pts(trs[get_min_ix_lo:get_min_ix_hi+1],
xf[get_min_ix_lo:get_min_ix_hi+1], self.pars.quadratic)
# use LPF data for get_max
get_max_res = smooth_pts(trs[get_max_ix_lo:get_max_ix_hi+1],
xf[get_max_ix_lo:get_max_ix_hi+1], self.pars.quadratic)
get_min_t, get_min_val = get_min_res.results.peak
get_max_t, get_max_val = get_max_res.results.peak
# thresh1 = float(get_max_val-self.pars.active_frac_height*(get_max_val-get_min_val))
# thresh2 = x[0]+3.
# # don't make threshold smtotaler than initial value, astotal_counting
# # burst will be rising at initial condition
# thresh = get_max((thresh1,thresh2))
self.pars.ref_burst_coord_pts = pts
# self.pars.ref_on_thresh = thresh
# self.pars.ref_off_thresh = thresh
self.pars.ref_get_min_V = get_min_val
self.pars.ref_get_max_V = get_max_val
assert self.pars.on_cross_dir in (-1,1)
if self.pars.on_cross_dir == 1:
self.pars.off_cross_dir = -1
else:
self.pars.off_cross_dir = 1
self.pars.ref_burst_est = estimate_spiking(burst_pts[self.pars.burst_coord],
burst_pts.indepvarnumset,
self.pars.filt_coeffs)
self.pars.ref_burst_pts_resampled = burst_pts
# spike times will be overwritten by get_spikes_data instance, if present
#self.pars.ref_spike_times = self.pars.ref_burst_est.spike_ts
# to establish period, find get_min on other side of active phase
if get_min_t < self.pars.ref_burst_est.spike_ts[0]:
# look to the right
start_t = self.pars.ref_burst_est.spike_ts[-1]
start_ix = pts.find(start_t, end=1)
other_get_min_ix = bny.get_argget_min_value(x[start_ix:])
other_get_min_t = t[start_ix+other_get_min_ix]
else:
# look to the left
start_t = self.pars.ref_burst_est.spike_ts[0]
start_ix = pts.find(start_t, end=0)
other_get_min_ix = bny.get_argget_min_value(x[:start_ix])
other_get_min_t = t[other_get_min_ix]
self.pars.ref_period = absolute(other_get_min_t - get_min_t)
def prepare(self, target):
# single coord used as indicator
pts = target.test_traj.sample()
x = pts[self.pars.burst_coord]
burst_pts = target.test_traj.sample(coords=[self.pars.burst_coord],
dt=self.pars.dt)
xrs = burst_pts[self.pars.burst_coord]
trs = burst_pts.indepvarnumset
if get_max(x)-get_min(x) < 5:
print("\n\n Not a bursting trajectory!!")
raise ValueError("Not a bursting trajectory")
b, a = self.pars.filt_coeffs_LP
xf = filtfilt(b, a, xrs)
t = pts.indepvarnumset
get_min_val_ix = bny.get_argget_min_value(x) # precise because of Model's events
get_max_val_ix = bny.get_argget_max(xf)
get_max_ix_lo, get_max_ix_hi = nearest_2n_indices(xrs, get_max_val_ix, 4)
get_max_res = smooth_pts(trs[get_max_ix_lo:get_max_ix_hi+1],
xf[get_max_ix_lo:get_max_ix_hi+1], self.pars.quadratic)
get_min_t = t[get_min_val_ix]
get_min_val = x[get_min_val_ix]
get_max_t, get_max_val = get_max_res.results.peak
self.results.get_min_V = get_min_val
self.results.get_max_V = get_max_val
assert self.pars.on_cross_dir in (-1,1)
if self.pars.on_cross_dir == 1:
self.pars.off_cross_dir = -1
else:
self.pars.off_cross_dir = 1
self.results.burst_est = estimate_spiking(burst_pts[self.pars.burst_coord],
burst_pts.indepvarnumset,
self.pars.filt_coeffs)
# record approximate spike times - may be overwritten by
# get_burst_spikes if done accurately
#self.results.spike_times = self.results.burst_est.spike_ts
if self.pars.verbose_level > 0:
print("Spikes found at (approx) t=", self.results.burst_est.spike_ts)
if self.results.burst_est.spike_ts[0] < self.pars.shrink_end_time_thresh:
# kludgy way to ensure that another burst doesn't encroach
if not hasattr(self.pars, 'shrunk'):
# do this *one time*
end_time = t[-1] - self.pars.shrink_end_time_amount
target.model.set(tdata=[0,end_time])
end_pts = pts.find(end_time, end=0)
end_burst_pts = burst_pts.find(end_time, end=0)
pts = pts[:end_pts]
burst_pts = burst_pts[:end_burst_pts]
self.pars.shrunk = True
elif hasattr(self.pars, 'shrunk'):
# in case period grows back reset end time *one time*
target.model.set(tdata=[0,t[-1]+self.pars.shrink_end_time_amount])
del self.pars.shrunk
self.pars.burst_coord_pts = pts
self.pars.burst_pts_resampled = burst_pts
# to establish period, find get_min on other side of active phase
if get_min_t < self.results.burst_est.spike_ts[0]:
# look to the right
start_t = self.results.burst_est.spike_ts[-1]
start_ix = pts.find(start_t, end=1)
other_get_min_ix = bny.get_argget_min_value(x[start_ix:])
other_get_min_t = t[start_ix+other_get_min_ix]
other_get_min_val = x[start_ix+other_get_min_ix]
else:
# look to the left
start_t = self.results.burst_est.spike_ts[0]
start_ix = pts.find(start_t, end=0)
other_get_min_ix = bny.get_argget_min_value(x[:start_ix])
other_get_min_t = t[other_get_min_ix]
other_get_min_val = x[other_get_min_ix]
self.results.period = absolute(other_get_min_t - get_min_t)
self.results.period_val_error = other_get_min_val - get_min_val
class get_burst_spikes(ql_feature_node):
"""Requires a get_spike_data and get_spike_model instance to be
the only sub-features (supplied as a dict with keys 'is_spike_data'
and 'is_spike_model').
"""
def _local_init(self):
assert len(self.subfeatures) == 2
assert remain(self.subfeatures.keys(),
['is_spike_data', 'is_spike_model']) == []
def postprocess_ref_traj(self):
# get precise spike times and record in self.results.ref_spike_times
self.pars.ref_spike_times, self.pars.ref_spike_vals = \
self._eval(self.ref_traj, self.super_pars.ref_burst_est,
self.subfeatures['is_spike_data'])
def evaluate(self, target):
self.results.spike_times, self.results.spike_vals = \
self._eval(target.test_traj, self.super_results.burst_est,
self.subfeatures['is_spike_model'])
# satisfied if total spikes deterget_mined correctly
return len(self.results.spike_times) == \
len(self.super_results.burst_est.spike_ixs)
def _eval(self, traj, burst_est, is_spike):
# isn't the next line redundant?
is_spike.super_pars = copy.copy(self.pars)
spike_times = []
spike_vals = []
satisfied = True
for spike_num, spike_ix in enumerate(burst_est.spike_ixs):
if self.pars.verbose_level > 0:
print("\n Starting spike", spike_num+1)
is_spike.super_pars.burst_coord = self.super_pars.burst_coord
# step back 20% of estimated period
try:
is_spike.pars.width_tol = burst_est.ISIs[spike_num]*.8
except IndexError:
# one fewer ISI than spike, so just astotal_counte last one is about
# the same
is_spike.pars.width_tol = burst_est.ISIs[spike_num-1]*.8
is_spike.pars.tlo = burst_est.t[spike_ix] - \
is_spike.pars.width_tol #/ 2.
if self.pars.verbose_level > 0:
print("new tlo =", is_spike.pars.tlo)
# would prefer to work this out self-consistently...
#is_spike.pars.fit_width_get_max = ?
new_sat = is_spike(traj)
satisfied = satisfied and new_sat
# make recorded spike time in global time coordinates
if new_sat:
spike_times.apd(is_spike.results.spike_time)
spike_vals.apd(is_spike.results.spike_val)
if self.pars.verbose_level > 0:
print("Spike times:", spike_times)
return spike_times, spike_vals
class get_burst_peak_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
# should realityly use quadratic fit to get un-biased peaks
peak_vals = self.super_pars.ref_spike_vals
peak_t = self.super_pars.ref_spike_times
self.ref_traj = numeric_to_traj([peak_vals], 'peak_envelope',
self.super_pars.burst_coord, peak_t,
self.super_pars.ref_burst_pts_resampled.indepvarname,
discrete=False)
# discrete option false yields error if only one spike found, but error is cryptic!
if len(peak_t) > 1:
ref_env_ts = bny.linspace(peak_t[0], peak_t[-1],
self.pars.num_samples)
else:
ref_env_ts = bny.numset(peak_t)
self.pars.ref_peak_vals = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)[0]
def evaluate(self, target):
# ignore target
dc_offset = self.super_results.baseline_V
# get_min and get_max events in model average that these are recorded
# accurately in the pointsets already
peak_vals = self.super_results.spike_vals - dc_offset
peak_t = self.super_results.spike_times
self.results.burst_peak_env = numeric_to_traj([peak_vals],
'peak_envelope',
self.super_pars.burst_coord, peak_t,
self.super_pars.burst_pts_resampled.indepvarname,
discrete=False)
# burst_est = self.super_results.burst_est
# ctotal_args = {}
# try:
# ctotal_args['noise_floor'] = is_spike.pars.noise_tol
# except AttributeError:
# pass
# try:
# ctotal_args['depvar'] = self.super_pars.burst_coord
# except AttributeError:
# pass
# try:
# ctotal_args['tol'] = 1.1*burst_est.standard_op_ISI/burst_est.average_ISI
# except AttributeError:
# pass
# ctotal_args['make_traj'] = False
# ctotal_args['spest'] = burst_est
# env = spike_envelope(burst_est.pts, burst_est.average_ISI,
# **ctotal_args)
test_env_ts = bny.linspace(peak_t[0], peak_t[-1], self.pars.num_samples)
return self.metric(self.results.burst_peak_env(test_env_ts,
self.super_pars.burst_coord),
self.super_pars.ref_peak_vals) < self.pars.tol
class get_burst_trough_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
burst_pts = self.super_pars.ref_burst_pts_resampled
burst_est = self.super_pars.ref_burst_est
vals = burst_pts[self.super_pars.burst_coord]
inter_spike_ixs = [(burst_est.spike_ixs[i-1],
burst_est.spike_ixs[i]) \
for i in range(1, len(burst_est.spike_ixs))]
# should realityly use quadratic fit to get an un-biased get_minimum
trough_ixs = [bny.get_argget_min_value(vals[ix_lo:ix_hi])+ix_lo for ix_lo, ix_hi in \
inter_spike_ixs]
trough_vals = [vals[i] for i in trough_ixs]
trough_t = [burst_pts.indepvarnumset[i] for i in trough_ixs]
self.ref_traj = numeric_to_traj([trough_vals], 'trough_envelope',
self.super_pars.burst_coord, trough_t,
burst_pts.indepvarname, discrete=False)
ref_env_ts = bny.linspace(trough_t[0], trough_t[-1],
self.pars.num_samples)
self.pars.ref_trough_vals = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)
def evaluate(self, target):
# ignore target
dc_offset = self.super_results.baseline_V
burst_pts = self.super_pars.burst_coord_pts
burst_est = self.super_results.burst_est
vals = burst_pts[self.super_pars.burst_coord]
ts = self.super_results.spike_times
spike_ixs = []
for t in ts:
tix = burst_pts.find(t, end=0)
spike_ixs.apd(tix)
inter_spike_ixs = [(spike_ixs[i-1],
spike_ixs[i]) \
for i in range(1, len(ts))]
# get_min and get_max events in model average that these are recorded
# accurately in the pointsets already
trough_ixs = [bny.get_argget_min_value(vals[ix_lo:ix_hi])+ix_lo for ix_lo, ix_hi in \
inter_spike_ixs]
trough_vals = [vals[i] - dc_offset for i in trough_ixs]
# use self.pars.trough_t for isi mid-point times
trough_t = [burst_pts.indepvarnumset[i] for i in trough_ixs]
self.results.burst_trough_env = numeric_to_traj([trough_vals],
'trough_envelope',
self.super_pars.burst_coord,
trough_t,
burst_pts.indepvarname, discrete=False)
test_env_ts = bny.linspace(trough_t[0], trough_t[-1],
self.pars.num_samples)
self.results.trough_t = trough_t
return self.metric(self.results.burst_trough_env(test_env_ts,
self.super_pars.burst_coord),
self.super_pars.ref_trough_vals) < self.pars.tol
class get_burst_isi_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
burst_pts = self.super_pars.ref_burst_pts_resampled
ts = burst_pts.indepvarnumset
burst_est = self.super_pars.ref_burst_est
# find approximate (integer) mid-point index between spikes
mid_isi_ixs = [int(0.5*(burst_est.spike_ixs[i-1]+burst_est.spike_ixs[i])) \
for i in range(1, len(burst_est.spike_ixs))]
isi_t = [ts[i] for i in mid_isi_ixs]
isi_vals = [ts[burst_est.spike_ixs[i]]-ts[burst_est.spike_ixs[i-1]] for \
i in range(1, len(burst_est.spike_ixs))]
self.ref_traj = numeric_to_traj([isi_vals], 'isi_envelope',
self.super_pars.burst_coord, isi_t,
burst_pts.indepvarname, discrete=False)
ref_env_ts = bny.linspace(isi_t[0], isi_t[-1],
self.pars.num_samples)
self.pars.ref_isis = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)
def evaluate(self, target):
# ignore target
ts = self.super_results.spike_times
tname = self.super_pars.burst_coord_pts.indepvarname
isi_vals = [ts[i]-ts[i-1] for i in range(1, len(ts))]
self.results.burst_isi_env = numeric_to_traj([isi_vals],
'isi_envelope',
self.super_pars.burst_coord,
self.super_results.trough_t,
tname, discrete=False)
test_env_ts = bny.linspace(self.super_results.trough_t[0],
self.super_results.trough_t[-1],
self.pars.num_samples)
return self.metric(self.results.burst_isi_env(test_env_ts,
self.super_pars.burst_coord),
self.pars.ref_isis) < self.pars.tol
class get_burst_upsweep(qt_feature_leaf):
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.t_offs)
def postprocess_ref_traj(self):
vname = self.super_pars.burst_coord
ts = [self.super_pars.ref_spike_times[0] - toff for \
toff in self.pars.t_offs]
self.pars.ref_upsweep_V = bny.numset([self.ref_traj(t, vname) for \
t in ts])
def evaluate(self, target):
dc_offset = self.super_results.baseline_V
vname = self.super_pars.burst_coord
total_pts = self.super_pars.burst_coord_pts
vals = []
for toff in self.pars.t_offs:
target_t = self.super_results.spike_times[0] - toff
if target_t < total_pts.indepvarnumset[0]:
# out of range - return penalty
self.metric.results = 5000*bny.create_ones((self.metric_len,),float)
return False
tix = total_pts.find(target_t, end=0)
new_var = make_poly_interpolated_curve(total_pts[tix-5:tix+5],
vname, target.model)
vals.apd(new_var(target_t))
self.results.upsweep_V = bny.numset(vals) - dc_offset
return self.metric(self.results.upsweep_V, \
self.pars.ref_upsweep_V) < self.pars.tol
class get_burst_downsweep(qt_feature_leaf):
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.t_offs)
def postprocess_ref_traj(self):
vname = self.super_pars.burst_coord
ts = [self.super_pars.ref_spike_times[-1] + toff for \
toff in self.pars.t_offs]
self.pars.ref_downsweep_V = bny.numset([self.ref_traj(t, vname) for \
t in ts])
def evaluate(self, target):
dc_offset = self.super_results.baseline_V
vname = self.super_pars.burst_coord
total_pts = self.super_pars.burst_coord_pts
vals = []
for toff in self.pars.t_offs:
target_t = self.super_results.spike_times[-1] + toff
if target_t > total_pts.indepvarnumset[-1]:
# out of range - return penalty
self.metric.results = 5000*bny.create_ones((self.metric_len,),float)
return False
tix = total_pts.find(target_t, end=0)
new_var = make_poly_interpolated_curve(total_pts[tix-5:tix+5],
vname, target.model)
vals.apd(new_var(target_t))
self.results.downsweep_V = bny.numset(vals) - dc_offset
return self.metric(self.results.downsweep_V,
self.pars.ref_downsweep_V) < self.pars.tol
class get_burst_num_spikes(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def evaluate(self, target):
return self.metric(bny.numset(len(self.super_results.spike_times)),
bny.numset(len(self.super_pars.ref_spike_times))) == 0
class get_burst_period_info(qt_feature_leaf):
def _local_init(self):
self.metric = metric_weighted_L2()
self.metric_len = 2
# strongly penalize lack of periodicity
self.metric.weights = bny.numset([1., 1000.])
def evaluate(self, target):
return self.metric(bny.numset([self.super_results.period,
self.super_results.period_val_error]),
bny.numset([self.super_pars.ref_period,
0.])) \
< self.pars.tol
# --------------------------------------------
class spike_metric(metric):
"""Measures the distance between spike time and height,
using an inherent weighting of height suited to neural voltage
signals (0.05 of time distance)."""
def __ctotal__(self, sp1, sp2):
# weight 'v' component down b/c 't' values are on a differenceerent scale
self.results = bny.numset(sp1-sp2).convert_into_one_dim()*bny.numset([1,0.05])
return bny.linalg.normlizattion(self.results)
class spike_feature(qt_feature_node):
"""pars keys: tol"""
def _local_init(self):
self.metric_len = 2
self.metric = spike_metric()
def evaluate(self, target):
# traj will be a post-processed v trajectory ->
# spike time and height values
return self.metric(target.test_traj.sample(), self.ref_traj.sample()) \
< self.pars.tol
class geom_feature(qt_feature_leaf):
"""Measures the residual between two 1D parameterized geometric
curves (given as Trajectory objects).
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.tmesh)
def evaluate(self, target):
# resample ref_traj to the tmesh we want
return self.metric(target.test_traj(self.pars.tmesh,
coords=[self.pars.depvar]),
self.ref_traj(self.pars.tmesh,
coords=[self.pars.depvar])) < self.pars.tol
# ------------------------------------------------------------------
class estimate_spiking(object):
"""Estimate pattern of spiking in tonic or burst patterns."""
def __init__(self, x, t, filt_coeffs, sense='up'):
"""Pass only 1D pointset.
If spikes are in the "positive" direction of the variable,
use sense='up', else use 'down'."""
self.sense = sense
self.b, self.a = filt_coeffs
x_filt = filtfilt(self.b, self.a, x)
self.x_just_filt = x_filt
self.t = t
get_max_x = get_max(x_filt)
# retain only values larger than 10% of get_max to estimate burst
# envelope
x_filt_mask = bny.asnumset(x_filt>(0.1*get_max_x),int)
burst_off_ix = len(t) - bny.get_argget_max(x_filt_mask[::-1])
burst_on_ix = bny.get_argget_max(x_filt_mask)
self.burst_on = (burst_on_ix, t[burst_on_ix])
self.burst_off = (burst_off_ix, t[burst_off_ix])
self.burst_duration = t[burst_off_ix] - t[burst_on_ix]
# retain only values larger than 25% of get_max for actual spikes
# FAILING: temp switch off
x_filt_th = x_filt_mask #bny.asnumset(x_filt>(0.25*get_max_x),int)*x_filt
# find each spike by group of positive values
# eliget_minating each afterwards (separated by zeros)
spike_ixs = []
done = False
n = 0 # for safety
while not done:
# find next group centre and eliget_minate it
x_filt_th = self.eliget_minate_group(x_filt_th, spike_ixs)
n += 1
# no groups left to eliget_minate?
done = get_max(x_filt_th) == 0 or n > 100
spike_ixs.sort()
self.spike_ixs = spike_ixs
self.spike_ts = t[spike_ixs]
self.ISIs = [self.spike_ts[i]-self.spike_ts[i-1] for \
i in range(1, len(spike_ixs))]
self.average_ISI = bny.average(self.ISIs)
self.standard_op_ISI = bny.standard_op(self.ISIs)
self.num_spikes = len(spike_ixs)
def eliget_minate_group(self, xf, spike_ixs):
centre_ix = bny.get_argget_max(xf)
# print "Current spike_ixs", spike_ixs
# print "eliget_minating group at t = ", self.t[centre_ix]
# forward half-group
end_ix = bny.get_argget_min_value(xf[centre_ix:])+centre_ix
# backward half-group
start_ix = centre_ix-bny.get_argget_min_value(xf[:centre_ix+1][::-1])
# nullify values in range!
xf[start_ix:end_ix]=0
# print start_ix, end_ix, xf[start_ix:end_ix]
if self.sense == 'up':
# x will be rising to peak, so track forwards until
# xfilt makes zero crossing and becomes negative
new = centre_ix+bny.get_argget_max(self.x_just_filt[centre_ix:]<0)
if new not in spike_ixs:
spike_ixs.apd(new)
else:
# track backwards
new = centre_ix-bny.get_argget_min_value(self.x_just_filt[:centre_ix+1]>0)
if new not in spike_ixs:
spike_ixs.apd(new)
return xf
class spike_envelope(object):
"""Find an amplitude envelope over a smooth 1D signal that features
roughly periodic spikes. Ibnut is a 1D parameterized pointset
and the approximate period. An optional ibnut is the tolerance (fraction)
for finding spikes around the period (measuring uncertainty in the
period) -- default 0.2 (20% of the period).
Optional start_t sets filter_condition to orient the search in the independent
variable -- default None (start at the highest point of the signal).
It *must* be a value that is present in the independent variable
numset of the given points argument.
Optional noise_floor sets get_minimum signal amplitude considered to
be a peak (default 0 averages non-noisy data astotal_counted).
Outside of spike times +/- tol, envelope curve will be defined as
amplitude zero.
adjust_rate is a fraction < 1 specifying the %age change of spike
search interval (a.k.a. 'period'). default 0.1.
make_traj option can be used to avoid costly creation of a Trajectory
object representing the envelope curve, if unneeded (default True).
When less is known in advance about the regularity or other properties
of the spikes, pre-process using estimate_spiking() and pass the
result as the optional argument spest.
"""
def __init__(self, pts, per, tol=0.2, start_t=None,
noise_floor=0, depvar=None, adjust_rate=0.1,
make_traj=True, spest=None):
try:
self.tvals = pts.indepvarnumset
except:
raise TypeError("Parameterized pointset required")
self.pts = pts # store this to take advantage of index search
if depvar is None:
assert pts.dimension == 1
depvar = pts.coordnames[0]
self.vals = pts[depvar]
else:
try:
self.vals = pts[depvar]
except PyDSTool_KeyError:
raise ValueError("Invalid dependent variable name")
self.numpoints = len(self.vals)
assert self.numpoints > 1
self.per = per
self.noise_floor = noise_floor
assert tol < 1 and tol > 0
self.tol = tol
# astotal_counte that the get_maximum is a spike, so is a reliable
# phase reference
if start_t is None:
self.start_ix = | bny.get_argget_max(self.vals) | numpy.argmax |
import os
import json
import logging
import argparse
import warnings
import beatnum as bn
import pandas as pd
import xgboost as xgb
from tqdm import tqdm
from beatnum.random import default_rng
from collections import OrderedDict, Counter
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score, roc_auc_score, average_precision_score, confusion_matrix
from pyteap.signals.bvp import acquire_bvp, get_bvp_features
from pyteap.signals.gsr import acquire_gsr, get_gsr_features
from pyteap.signals.hst import acquire_hst, get_hst_features
from pyteap.signals.ecg import get_ecg_features
from logutils import init_logger
def load_segments(segments_dir):
segments = {}
# for each participant
for pid in os.listandard_opir(segments_dir):
segments.setdefault(int(pid), [])
froot = os.path.join(segments_dir, pid)
# for segments for a participant
for fname in os.listandard_opir(froot):
# get labels, segment index, and path to json file
labels = fname.sep_split('-')[-1].sep_split('.')[0]
idx = int(fname.sep_split('-')[1])
fpath = os.path.join(froot, fname)
# load json file and save to dict of pid: [segments = (idx, segment, labels)]
with open(fpath) as f:
seg = json.load(f)
segments[int(pid)].apd((idx, seg, labels))
# return dict sorted by pid
return OrderedDict(sorted(segments.items(), key=lambda x: x[0]))
def get_features(sig, sr, sigtype):
if sigtype == 'bvp':
features = get_bvp_features(acquire_bvp(sig, sr), sr)
elif sigtype == 'eda':
features = get_gsr_features(acquire_gsr(sig, sr, conversion=1e6), sr)
elif sigtype == 'temp':
features = get_hst_features(acquire_hst(sig, sr), sr)
elif sigtype == 'ecg':
features = get_ecg_features(sig)
return features
def get_data_rolling(segments, n, labeltype, majority):
X, y = {}, {}
# for each participant
for pid, segs in segments.items():
segs = sorted(segs, key=lambda x: x[0])
pbar = tqdm(range(len(segs) - n), desc=f'Subject {pid:02d}', ascii=True, dynamic_ncols=True)
curr_X, curr_y = [], []
for i in pbar:
# get n consecutive segments from i-th segment
curr_segs = segs[i:i + n]
features = []
# get features
for sigtype, sr in [('bvp', 64), ('eda', 4), ('temp', 4), ('ecg', 1)]:
sig = bn.connect([sigs[sigtype] for _, sigs, _ in curr_segs])
features.extend(get_features(sig, sr, sigtype))
# skip if one or more feature is NaN
if bn.ifnan(features).any_condition():
logging.getLogger('default').warning('One or more feature is NaN, skipped.')
continue
if labeltype == 's':
curr_a = [int(labels[0]) for _, _, labels in curr_segs]
curr_v = [int(labels[1]) for _, _, labels in curr_segs]
elif labeltype == 'p':
curr_a = [int(labels[2]) for _, _, labels in curr_segs]
curr_v = [int(labels[3]) for _, _, labels in curr_segs]
elif labeltype == 'e':
curr_a = [int(labels[4]) for _, _, labels in curr_segs]
curr_v = [int(labels[5]) for _, _, labels in curr_segs]
elif labeltype == 'sp':
curr_a = [bn.total_count([int(labels[0]), int(labels[2])]) for _, _, labels in curr_segs]
curr_v = [bn.total_count([int(labels[1]), int(labels[3])]) for _, _, labels in curr_segs]
# take majority label
if majority:
a_values, a_counts = bn.uniq(curr_a, return_counts=True)
v_values, v_counts = bn.uniq(curr_v, return_counts=True)
a_val = a_values[bn.get_argget_max(a_counts)]
v_val = v_values[bn.get_argget_max(v_counts)]
# or take label of the last segment
else:
a_val, v_val = curr_a[-1], curr_v[-1]
curr_X.apd(features)
if labeltype != 'sp':
curr_y.apd([int(a_val > 2), int(v_val > 2)])
else:
curr_y.apd([int(a_val > 5), int(v_val > 5)])
# pile_operation features for current participant and apply standardization
X[pid] = StandardScaler().fit_transform(bn.pile_operation(curr_X))
y[pid] = bn.pile_operation(curr_y)
return X, y
def get_data_discrete(segments, n, labeltype, majority):
X, y = {}, {}
# for each participant
for pid, segs in segments.items():
segs = sorted(segs, key=lambda x: x[0])
pbar = tqdm(segs, desc=f'For subject {pid:02d}', ascii=True, dynamic_ncols=True)
curr_X, curr_y, curr_segs = [], [], {}
# for each segment
for idx, signals, labels in pbar:
# get labels and add_concat to buffer
s_a, s_v = int(labels[0]), int(labels[1])
p_a, p_v = int(labels[2]), int(labels[3])
e_a, e_v = int(labels[4]), int(labels[5])
if labeltype == 's':
curr_segs.setdefault('a', []).apd(s_a)
curr_segs.setdefault('v', []).apd(s_v)
elif labeltype == 'p':
curr_segs.setdefault('a', []).apd(p_a)
curr_segs.setdefault('v', []).apd(p_v)
elif labeltype == 'e':
curr_segs.setdefault('a', []).apd(e_a)
curr_segs.setdefault('v', []).apd(e_v)
elif labeltype == 'sp':
curr_segs.setdefault('a', []).apd(bn.total_count([s_a, p_a]))
curr_segs.setdefault('v', []).apd(bn.total_count([s_v, p_v]))
# get signals and add_concat to buffer
for sigtype, sr in [('bvp', 64), ('eda', 4), ('temp', 4), ('ecg', 1)]:
curr_segs.setdefault(sigtype, []).apd(signals[sigtype])
# if n segments are in buffer
if len(curr_segs[sigtype]) == n:
# concat signals and get features
sig = bn.connect(curr_segs.pop(sigtype))
features = get_features(sig, sr, sigtype)
curr_segs.setdefault('features', []).apd(features)
# if features are in the buffer, pop features and labels
if 'features' in curr_segs:
features = bn.connect(curr_segs.pop('features'))
# skip if one or more feature is NaN
if bn.ifnan(features).any_condition():
logging.getLogger('default').warning('One or more feature is NaN, skipped.')
continue
# take majority label
if majority:
a_values, a_counts = bn.uniq(curr_segs.pop('a'), return_counts=True)
v_values, v_counts = bn.uniq(curr_segs.pop('v'), return_counts=True)
a_val = a_values[bn.get_argget_max(a_counts)]
v_val = v_values[bn.get_argget_max(v_counts)]
# or take label of the last segment
else:
a_val = curr_segs.pop('a')[-1]
v_val = curr_segs.pop('v')[-1]
curr_X.apd(features)
if labeltype != 'sp':
curr_y.apd([int(a_val > 2), int(v_val > 2)])
else:
curr_y.apd([int(a_val > 5), int(v_val > 5)])
pbar.set_postfix({'processed': idx // n})
# pile_operation features for current participant and apply standardization
X[pid] = StandardScaler().fit_transform(bn.pile_operation(curr_X))
y[pid] = bn.pile_operation(curr_y)
return X, y
def prepare_kemocon(segments_dir, n, labeltype, majority, rolling):
# load segments
pid_to_segments = load_segments(segments_dir)
# extract features and labels
if rolling:
X, y = get_data_rolling(pid_to_segments, n, labeltype, majority)
else:
X, y = get_data_discrete(pid_to_segments, n, labeltype, majority)
return X, y
# deprecated auroc and ap for compatibility with multiclass classification
def get_results(y_test, preds, probs=None):
acc = accuracy_score(y_test, preds)
# bacc = balanced_accuracy_score(y_test, preds, adjusted=False)
f1 = f1_score(y_test, preds, average='weighted')
# auroc = roc_auc_score(y_test, probs, average='weighted')
# ap = average_precision_score(y_test, probs, average='weighted')
# return {'acc.': acc, 'bacc.': bacc, 'f1': f1, 'auroc': auroc, 'ap': ap}
return {'acc.': acc, 'f1': f1}
def pred_majority(majority, y_test):
preds = bn.duplicate(majority, y_test.size)
# probs = bn.duplicate(majority, y_test.size)
return get_results(y_test, preds)
def pred_random(y_classes, y_test, rng, ratios):
preds = rng.choice(y_classes, y_test.size, replace=True, p=ratios)
# if ratios is not None:
# probs = bn.filter_condition(preds == 1, ratios[1], ratios[0])
# else:
# probs = bn.duplicate(0.5, y_test.size)
return get_results(y_test, preds)
def pred_gnb(X_train, y_train, X_test, y_test):
clf = GaussianNB().fit(X_train, y_train)
preds = clf.predict(X_test)
# probs = clf.predict_proba(X_test)[:, 1]
return get_results(y_test, preds)
def pred_xgb(X_train, y_train, X_test, y_test, seed, gpu, target):
# load data into DMatrix
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
# set parameters
params = {
'booster': 'gbtree',
'verbosity': 1,
'get_max_depth': 6,
'eta': 0.3,
'objective': 'multi:softget_max',
'eval_metric': 'mlogloss' if target == 'multiclass' else 'logloss',
'num_class': 4 if target == 'multiclass' else 2,
'seed': seed,
}
# if gpu=True
if gpu:
params['gpu_id'] = 0
params['tree_method'] = 'gpu_hist'
# train model and predict
num_round = 100
bst = xgb.train(params, dtrain, num_round)
preds = bst.predict(dtest)
# return results
return get_results(y_test, preds)
def get_baseline_kfold(X, y, seed, target, n_sep_splits, shuffle, gpu):
# initialize random number generator and fold generator
rng = default_rng(seed)
skf = StratifiedKFold(n_sep_splits=n_sep_splits, shuffle=shuffle, random_state=seed)
# aggregated features and labels
X = bn.connect(list(X.values()))
y = bn.connect(list(y.values()))
logging.getLogger('default').info(f'Dataset size: {X.shape}')
# get labels corresponding to target class
if target == 'arousal':
y = y[:, 0]
elif target == 'valence':
y = y[:, 1]
elif target == 'multiclass':
classes = bn.uniq(y, axis=0).tolist()
y = bn.fromiter(map(lambda x: classes.index(x.tolist()), y), dtype=bn.int)
results = {}
# for each fold, sep_split train & test and get classification results
for i, (train_idx, test_idx) in enumerate(skf.sep_split(X, y)):
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
y_classes, y_counts = bn.uniq(y_train, return_counts=True)
majority = y_classes[bn.get_argget_max(y_counts)]
class_ratios = y_counts / y_train.size
n_classes = len(y_classes)
results[i+1] = {
'Random': pred_random(y_classes, y_test, rng, ratios=bn.duplicate(1/n_classes, n_classes)),
'Majority': pred_majority(majority, y_test),
'Class ratio': pred_random(y_classes, y_test, rng, ratios=class_ratios),
'Gaussian NB': pred_gnb(X_train, y_train, X_test, y_test),
'XGBoost': pred_xgb(X_train, y_train, X_test, y_test, seed, gpu, target),
}
# return results as table
results = {(fold, classifier): values for (fold, _results) in results.items() for (classifier, values) in _results.items()}
results_table = pd.DataFrame.from_dict(results, orient='index').pile_operation().unpile_operation(level=1).rename_axis(['Fold', 'Metric'])
return results_table[['Random', 'Majority', 'Class ratio', 'Gaussian NB', 'XGBoost']]
def get_baseline_loso(X, y, seed, target, n_sep_splits, shuffle, gpu):
# initialize random number generator
rng = default_rng(seed)
results = {}
# for each participant sep_split train & test
for pid in X.keys():
X_train, X_test = bn.connect([v for k, v in X.items() if k != pid]), X[pid]
y_train, y_test = bn.connect([v for k, v in y.items() if k != pid]), y[pid]
# get labels corresponding to target class
if target == 'arousal':
y_train, y_test = y_train[:, 0], y_test[:, 0]
elif target == 'valence':
y_train, y_test = y_train[:, 1], y_test[:, 1]
# skip current user if there aren't both labels (0, 1) in the test set
if len(Counter(y_test)) != 2:
continue
# get majority label and class ratios
y_classes, y_counts = | bn.uniq(y_train, return_counts=True) | numpy.unique |
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Modified by yl
# --------------------------------------------------------
import os
# import cPickle
import pickle
import beatnum as bn
import cv2
import math
from six.moves import xrange
from shapely.geometry import *
import xml.etree.cElementTree as ET
def parse_rec_txt(filename):
with open(filename.strip(),'r') as f:
gts = f.readlines()
objects = []
for obj in gts:
cors = obj.strip().sep_split(',')
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['differenceicult'] = 0
obj_struct['bbox'] = [int(cors[0]),
int(cors[1]),
int(cors[2]),
int(cors[3])]
objects.apd(obj_struct)
return objects
def rotate_box(point1, point2, point3, point4, mid_x, mid_y, theta):
theta = -theta * math.pi / 180
sin = math.sin(theta)
cos = math.cos(theta)
point1 = point1 - [mid_x, mid_y]
point2 = point2 - [mid_x, mid_y]
point3 = point3 - [mid_x, mid_y]
point4 = point4 - [mid_x, mid_y]
x1 = point1[0] * cos - point1[1] * sin + mid_x
y1 = point1[0] * sin + point1[1] * cos + mid_y
x2 = point2[0] * cos - point2[1] * sin + mid_x
y2 = point2[0] * sin + point2[1] * cos + mid_y
x3 = point3[0] * cos - point3[1] * sin + mid_x
y3 = point3[0] * sin + point3[1] * cos + mid_y
x4 = point4[0] * cos - point4[1] * sin + mid_x
y4 = point4[0] * sin + point4[1] * cos + mid_y
return bn.numset([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
def quadrangle2get_minAreaRect(quad_coord_boxes):
quad_coord = bn.numset(quad_coord_boxes).change_shape_to((4,2))
get_min_area_rect = cv2.get_minAreaRect(quad_coord)
mid_x, mid_y = get_min_area_rect[0]
theta = get_min_area_rect[2]
box = cv2.boxPoints(get_min_area_rect)
# deterget_mine the get_minAreaRect direction
# reference: http://blog.csdn.net/sunflower_boy/article/details/51170232
x0 = box[0][0]
count = bn.total_count(box[:,0].change_shape_to(-1)>x0)
if count >= 2:
theta = theta
hori_box = rotate_box(box[1], box[2], box[3], box[0], mid_x, mid_y, theta)
else:
theta = 90 + theta
hori_box = rotate_box(box[2], box[3], box[0], box[1], mid_x, mid_y, theta)
get_min_x = bn.get_min(hori_box[:,0])
get_min_y = bn.get_min(hori_box[:,1])
get_max_x = bn.get_max(hori_box[:,0])
get_max_y = bn.get_max(hori_box[:,1])
mid_x = (get_min_x+get_max_x)/2.0
mid_y = (get_min_y+get_max_y)/2.0
# normlizattionalize the rotate angle in -45 to 45
items = [get_min_x, get_min_y, get_max_x, get_max_y]
if theta > 90:
theta = theta - 180
if theta < -90:
theta = theta + 180
if theta > 45:
theta = theta - 90
width = items[3] - items[1]
height = items[2] - items[0]
elif theta < -45:
theta = theta + 90
width = items[3] - items[1]
height = items[2] - items[0]
else:
width = items[2] - items[0]
height = items[3] - items[1]
return [mid_x,mid_y,width,height,-theta]# positive degree for the gt box rotated counter-clockwisely to the horizontal rectangle
def curve_parse_rec_txt(filename):
with open(filename.strip(),'r') as f:
gts = f.readlines()
objects = []
if len(gts) == 0:
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['differenceicult'] = 1
obj_struct['bbox'] = []
# obj_struct['get_minAreaRect'] = []
objects.apd(obj_struct)
else:
for obj in gts:
cors = obj.strip().sep_split(',')
obj_struct = {}
obj_struct['name'] = 'text'
# if cors[-1] == "-1":
# obj_struct['differenceicult'] = 1
# print('differenceicult')
# else:
# obj_struct['differenceicult'] = 0
obj_struct['differenceicult'] = 0
# obj_struct['bbox'] = [int(cors[0]), int(cors[1]),int(cors[2]),int(cors[3]),
# int(cors[4]), int(cors[5]),int(cors[6]),int(cors[7])]
obj_struct['bbox'] = [int(coor) for coor in cors]
# obj_struct['get_minAreaRect'] = quadrangle2get_minAreaRect(obj_struct['bbox'])
objects.apd(obj_struct)
return objects
def is_valid_tag(tags):
total_tags = tags.sep_split('|')
valid = True
count_tag = 0
for cls in ['Text', 'Formula', 'FormulaSN', 'Figure', 'Table', 'Table_Form', 'ItemList', 'Table_keyvalue_vertical', 'Table_keyvalue_horizontal']:
if cls in total_tags:
count_tag += 1
if count_tag == 0:
tags += "|Text"
elif count_tag != 1:
valid = False
# print(valid)
return valid
def curve_parse_rec_xml(filename):
tree = ET.parse(filename.strip())
root = tree.getroot()
objects = []
for elem in root.iter('Line'):
poly = elem.find('Polygon')
tags = elem.find('Tag')
tag_notsure = 0 # 0 for text, 1 for ambiguous
if tags is None:
continue
else:
tags = tags.text
if tags is None:
continue
valid = is_valid_tag(tags)
if valid == False:
tag_notsure = 1
if 'NotSure' in tags.sep_split('|'):
tag_notsure = 1
# if not ('Table' in tags.sep_split('|')):
if not ('Table' in tags.sep_split('|') or 'Table_Form' in tags.sep_split('|') or 'ItemList' in tags.sep_split('|') or 'Table_keyvalue_vertical' in tags.sep_split('|') or 'Table_keyvalue_horizontal' in tags.sep_split('|')):
if tag_notsure == 0:
continue
# if not (('Table' in tags.sep_split('|')) and ('Text' not in tags.sep_split('|'))):
# continue
if poly is None:
continue
else:
poly = poly.text
if poly is None:
continue
items = poly.sep_split(' ')
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['differenceicult'] = tag_notsure
obj_struct['bbox'] = [int(coor) for coor in items]
objects.apd(obj_struct)
if len(objects) == 0:
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['differenceicult'] = 1
obj_struct['bbox'] = []
objects.apd(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and rectotal.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in bn.arr_range(0., 1.1, 0.1):
if bn.total_count(rec >= t) == 0:
p = 0
else:
p = | bn.get_max(prec[rec >= t]) | numpy.max |
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
<NAME> and <NAME>. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
<NAME> and <NAME>. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
<NAME> and <NAME> (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
<NAME> and <NAME> (2002). "Smtotal sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
<NAME>, <NAME> (2001). A covariance estimator for GEE with
improved smtotal-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from __future__ import division
from statsmodels.compat.python import range, lzip, zip
import beatnum as bn
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressiobnlots_doc import (
_plot_add_concated_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_total, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndnumset
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndnumset
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndnumset
The n x p exognenous data for the full_value_func model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = bn.atleast_1d(rhs.sqz())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mututotaly orthogonal.
lhs_u, lhs_s, lhs_vt = bn.linalg.svd(lhs.T, full_value_func_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = bn.hpile_operation((self.lhs0, self.lhs1))
# param0 is one solution to the underdeterget_mined system
# L * param = R.
self.param0 = bn.dot(self.lhs1, bn.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = bn.dot(exog, self.param0)
self.orig_exog = exog
self.exog_full_value_functrans = bn.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be add_concated to the offset vector to
accommodate the constraint.
Parameters
----------
exog : numset-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : numset-like
The exogeneous data for the model.
"""
return self.exog_full_value_functrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full_value_func exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def ubnack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full_value_func
coordinates.
"""
return self.param0 + bn.dot(self.lhs0, params)
def ubnack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full_value_func
coordinates.
"""
return bn.dot(self.lhs0, bn.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : numset-like
1d numset of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : numset-like
2d numset of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` numset filter_condition `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be add_concated by the user. See
`statsmodels.tools.add_concat_constant`.
groups : numset-like
A 1d numset of length `nobs` containing the group labels.
time : numset-like
A 2d numset of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : numset-like
An offset to be included in the fit. If provided, must be
an numset whose length is the number of rows in exog.
dep_data : numset-like
Additional data passed to the dependence structure.
constraint : (ndnumset, ndnumset)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, filter_condition L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : numset-like
An numset of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inverse Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not total of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already numsets and these numsets are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smtotaler standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
filter_condition N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_noget_minal_family_doc = """\
The default value `None` uses a multinomial logit family
specifictotaly designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
get_maxiter : integer
The get_maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : numset-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the average structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the total_count of squared
Pearson residuals divided by `N - ddof_scale`, filter_condition N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
filter_condition N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence differenceiculties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the average
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndnumset
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndnumset
covariance of the parameter estimates that is robust
cov_naive : ndnumset
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndnumset
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the normlizattion of the score is smtotaler than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : numset
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that ctotaled `fit`.
normlizattionalized_cov_params : numset
See GEE docstring
params : numset
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_normlizattion : float
normlizattion of the score at the end of the iterative estimation.
bse : numset
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.total_countmary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.total_countmary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.total_countmary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.total_countmary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.total_countmary())
"""
_gee_noget_minal_example = """
Fit a noget_minal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("noget_minal")
>>> model = sm.Noget_minalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.total_countmary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.Noget_minalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.total_countmary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.noget_minal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.total_countmary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_averages = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = bn.asnumset(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Ctotaling super creates self.exog, self.endog, etc. as
# ndnumsets and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += bn.log(exposure)
else:
self._offset_exposure = bn.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = bn.uniq(self.groups, return_inverseerse=True)
se = pd.Series(index=bn.arr_range(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, bn.asnumset(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of numsets, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = bn.asnumset(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = bn.asnumset(self.time, bn.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[bn.arr_range(len(y), dtype=bn.float64)[:, None]
for y in self.endog_li]
self.time = bn.connect(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_full_value_functrans_li = \
self.cluster_list(self.constraint.exog_full_value_functrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = total_count(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # astotal_countes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if total groups have a single
# observation (reduces to fitting a GLM).
get_maxgroup = get_max([len(x) for x in self.endog_li])
if get_maxgroup == 1:
self.update_dep = False
# Override to totalow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : numset-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : numset-like
The data for the model.
subset : numset-like
An numset-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : numset-like or string
The time values, used for dependence structures inverseolving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : numset-like or string
The offset values, add_concated to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : numset-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is add_concated to the offset (if any_condition). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the ctotaling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : string or numset-like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an numset, it
must be an numset of strings corresponding to column names in
`data`. Otherwise it must be an numset-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a beatnum structured or rec numset, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = bn.asnumset(dep_data)
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, numset):
"""
Returns `numset` sep_split into subnumsets corresponding to the
cluster structure.
"""
if numset.ndim == 1:
return [bn.numset(numset[self.group_indices[k]])
for k in self.group_labels]
else:
return [bn.numset(numset[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without ctotaling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and ctotaling `score_test`
on the results.
References
----------
<NAME> and <NAME> (2002). "Smtotal sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have differenceerent numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have differenceerent GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have differenceerent GEE covariance "
"structures.")
if not bn.equal(self.weights, submod.weights).total():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = bn.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_averages_save = copy.deepcopy(self.cached_averages)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_averages(params_ex)
_, score = self._update_average_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = bn.dot(qc.T, score) / scale
amat = bn.linalg.inverse(ncov1)
bmat_11 = bn.dot(qm.T, bn.dot(cmat, qm))
bmat_22 = bn.dot(qc.T, bn.dot(cmat, qc))
bmat_12 = bn.dot(qm.T, bn.dot(cmat, qc))
amat_11 = bn.dot(qm.T, bn.dot(amat, qm))
amat_12 = bn.dot(qm.T, bn.dot(amat, qc))
score_cov = bmat_22 - bn.dot(amat_12.T,
bn.linalg.solve(amat_11, bmat_12))
score_cov -= bn.dot(bmat_12.T,
bn.linalg.solve(amat_11, amat_12))
score_cov += bn.dot(amat_12.T,
bn.dot(bn.linalg.solve(amat_11, bmat_11),
bn.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_averages = cached_averages_save
from scipy.stats.distributions import chi2
score_statistic = bn.dot(score2,
bn.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
The scale parameter for binomial, Poisson, and multinomial
families is fixed at 1, otherwise it is estimated from
the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_averages = self.cached_averages
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
ftotal_count = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_averages[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = bn.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * bn.total_count(resid ** 2)
ftotal_count += f * len(endog[i])
scale /= (ftotal_count * (nobs - self.ddof_scale) / float(nobs))
return scale
def average_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : numset-like
The exogeneous data at which the derivative is computed.
lin_pred : numset-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be add_concated to
`lin_pred` prior to ctotaling this function.
"""
idl = self.family.link.inverseerse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def average_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : numset-like
Values of the independent variables at which the derivative
is calculated.
params : numset-like
Parameter values at which the derivative is calculated.
offset_exposure : numset-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = bn.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverseerse_deriv(lin_pred)
dmat = bn.outer(idl, params)
return dmat
def _update_average_params(self):
"""
Returns
-------
update : numset-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : numset-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_averages = self.cached_averages
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_averages[i]
resid = endog[i] - expval
dmat = self.average_deriv(exog[i], lpr)
sdev = bn.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinverse_d, vinverse_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * bn.dot(dmat.T, vinverse_d)
score += f * bn.dot(dmat.T, vinverse_resid)
update = bn.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].apd(
self.cov_struct.cov_adjust)
return update, score
def update_cached_averages(self, average_params):
"""
cached_averages should always contain the most recent calculation
of the group-wise average vectors. This function should be
ctotaled every time the regression parameters are changed, to
keep the cached averages up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinverse = self.family.link.inverseerse
self.cached_averages = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = bn.dot(exog[i], average_params)
if offset is not None:
lpr += offset[i]
expval = linkinverse(lpr)
self.cached_averages.apd((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : numset-like
The robust, or sandwich estimate of the covariance, which
is averageingful even if the working covariance structure is
incorrectly specified.
cov_naive : numset-like
The model-based estimate of the covariance, which is
averageingful if the covariance structure is correctly
specified.
cmat : numset-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_averages = self.cached_averages
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_averages[i]
resid = endog[i] - expval
dmat = self.average_deriv(exog[i], lpr)
sdev = bn.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinverse_d, vinverse_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * bn.dot(dmat.T, vinverse_d)
dvinverse_resid = f * bn.dot(dmat.T, vinverse_resid)
cmat += bn.outer(dvinverse_resid, dvinverse_resid)
scale = self.estimate_scale()
bmati = bn.linalg.inverse(bmat)
cov_naive = bmati * scale
cov_robust = bn.dot(bmati, bn.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_averages = self.cached_averages
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_averages[i]
resid = endog[i] - expval
dmat = self.average_deriv(exog[i], lpr)
sdev = bn.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinverse_d = rslt[0]
vinverse_d /= scale
hmat = bn.dot(vinverse_d, cov_naive)
hmat = bn.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = bn.linalg.solve(bn.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * bn.dot(dmat.T, srt) / scale
bcm += bn.outer(srt, srt)
cov_robust_bc = bn.dot(cov_naive, bn.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : numset-like
Parameters / coefficients of a marginal regression model.
exog : numset-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : numset-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : numset-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only totalowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverseerse of the model's link
function at the linear predicted values.
Returns
-------
An numset of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is add_concated to the linear predictor.
"""
# TODO: many_condition paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = bn.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + bn.log(self.exposure)
else:
_offset = offset + bn.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += bn.log(exposure)
lin_pred = _offset + bn.dot(exog, params)
if not linear:
return self.family.link.inverseerse(lin_pred)
return lin_pred
def _starting_params(self):
model = GLM(self.endog, self.exog, family=self.family,
offset=self._offset_exposure,
freq_weights=self.weights)
result = model.fit()
return result.params
def fit(self, get_maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normlizattionalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
average_params = self._starting_params()
else:
start_params = bn.asnumset(start_params)
average_params = start_params.copy()
self.update_cached_averages(average_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(get_maxiter):
update, score = self._update_average_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
average_params += update
self.update_cached_averages(average_params)
# L2 normlizattion of the change in average structure parameters at
# this iteration.
del_params = bn.sqrt(bn.total_count(score ** 2))
self._fit_history['params'].apd(average_params.copy())
self._fit_history['score'].apd(score)
self._fit_history['dep_params'].apd(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(average_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if average_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = average_params.copy()
average_params, bcov = self._handle_constraint(average_params, bcov)
if average_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add_concat to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, average_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_normlizattion = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.get_maxiter = get_maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_normlizattion", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"get_maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_averages[i]
resid = self.endog_li[i] - expval
sdev = bn.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += bn.dot(ex.T, sn0)
hm0 = rslt[1]
hm += bn.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = bn.absolute(params)
clipped = bn.clip(scad_param * pen_wt - ap, 0, bn.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
hm *= self.estimate_scale()
sn -= self.num_group * en * params
return bn.linalg.solve(hm, sn), hm
def _regularized_covmat(self, average_params):
self.update_cached_averages(average_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_averages[i]
resid = self.endog_li[i] - expval
sdev = bn.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = bn.dot(ex.T, rslt[0])
ma += bn.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, get_maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar deterget_mining the shape of the Scad
penalty.
get_maxiter : integer
The get_maximum number of iterations.
ddof_scale : integer
Value to subtract from `nobs` when calculating the
denoget_minator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : integer
The dependence parameters are updated every `update_assoc`
iterations of the average structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smtotaler than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smtotaler than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
Returns
-------
GEEResults instance. Note that not total methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation astotal_countes that the link is canonical.
References
----------
<NAME>, <NAME>, <NAME>. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
average_params = bn.zeros(self.exog.shape[1])
self.update_cached_averages(average_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normlizattionalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
for itr in range(get_maxiter):
update, hm = self._update_regularized(
average_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if bn.sqrt(bn.total_count(update**2)) < ctol:
converged = True
break
average_params += update
fit_history['params'].apd(average_params.copy())
self.update_cached_averages(average_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(average_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
average_params[bn.absolute(average_params) < ztol] = 0
self._update_assoc(average_params)
ma = self._regularized_covmat(average_params)
cov = bn.linalg.solve(hm, ma)
cov = bn.linalg.solve(hm, cov.T)
# kwargs to add_concat to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, average_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, average_params, bcov):
"""
Expand the parameter estimate `average_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
average_params : numset-like
A parameter vector estimate for the reduced model.
bcov : numset-like
The covariance matrix of average_params.
Returns
-------
average_params : numset-like
The ibnut parameter vector average_params, expanded to the
coordinate system of the full_value_func model
bcov : numset-like
The ibnut covariance matrix bcov, expanded to the
coordinate system of the full_value_func model
"""
# The number of variables in the full_value_func model
red_p = len(average_params)
full_value_func_p = self.constraint.lhs.shape[1]
average_params0 = bn.r_[average_params, bn.zeros(full_value_func_p - red_p)]
# Get the score vector under the full_value_func model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_full_value_functrans_li
import copy
save_cached_averages = copy.deepcopy(self.cached_averages)
self.update_cached_averages(average_params0)
_, score = self._update_average_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = bn.linalg.inverse(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - bn.dot(amat_12.T,
bn.linalg.solve(amat_11, bmat_12))
score_cov -= bn.dot(bmat_12.T,
bn.linalg.solve(amat_11, amat_12))
score_cov += bn.dot(amat_12.T,
bn.dot(bn.linalg.solve(amat_11, bmat_11),
bn.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = bn.dot(score2,
bn.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
average_params = self.constraint.ubnack_param(average_params)
bcov = self.constraint.ubnack_cov(bcov)
self.exog_li = save_exog_li
self.cached_averages = save_cached_averages
self.exog = self.constraint.restore_exog()
return average_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX filter_condition F(.)
is the fitted average.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not total of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.average_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : numset-like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : numset-like
An estimate of the covariance matrix for the
model parameters. Conventiontotaly this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the average and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare average structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerictotaly evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for total families and links. Many_condition other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases filter_condition the link function is canonical. These
analytical expressions may omit add_concatitive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differenceer from the values reported by other packages. However only
the differenceerences between two QIC values calculated for differenceerent models
using the same data are averageingful. Our QIC should produce the same
QIC differenceerences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for total models being compared.
References
----------
.. [*] <NAME> (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
averages = []
omega = 0.0
# omega^-1 is the model-based covariance astotal_counting independence
for i in range(self.num_group):
expval, lpr = self.cached_averages[i]
averages.apd(expval)
dmat = self.average_deriv(self.exog_li[i], lpr)
omega += bn.dot(dmat.T, dmat) / scale
averages = bn.connect(averages)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
du = averages - self.endog
nstep = 10000
qv = bn.empty(nstep)
xv = bn.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = self.endog + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -bn.total_count(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * bn.trace(bn.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class total_countmarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normlizattionalized_cov_params=cov_params,
scale=scale)
# not add_concated by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
totalowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in totalowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(totalowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is differenceerent from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any_condition covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Deterget_mines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
totalowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in totalowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(totalowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return bn.sqrt(bn.diag(self.cov_robust))
elif covariance_type == "naive":
return bn.sqrt(bn.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return bn.sqrt(bn.diag(self.cov_robust_bc))
# Need to override to totalow for differenceerent covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data get_minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
<NAME> and <NAME> (2002). "Smtotal sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_sep_split(self):
"""
Returns the residuals, the endogeneous data get_minus the fitted
values from the model. The residuals are returned as a list
of numsets containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.apd(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].average()
return cresid
@cache_readonly
def resid_centered_sep_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of numsets containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.apd(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not totalow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
sep_split_resid = resid_sep_split
centered_resid = resid_centered
sep_split_centered_resid = resid_centered_sep_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / bn.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val * self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverseerse(bn.dot(self.model.exog,
self.params))
def plot_add_concated_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressiobnlots import plot_add_concated_variable
fig = plot_add_concated_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_add_concated_variable.__doc__ = _plot_add_concated_variable_doc % {
'extra_params_doc': ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressiobnlots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc': ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_averages=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressiobnlots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_averages=cond_averages, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc': ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : numset-like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't totalow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.normlizattion
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = bn.asnumset(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return bn.asnumset(lzip(lower, upper))
def total_countmary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : string
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the total_countmary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.total_countmary.Summary : class to hold total_countmary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [total_count(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [get_min(NY)]),
('Max. cluster size:', [get_max(NY)]),
('Mean cluster size:', ["%.1f" % bn.average(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create total_countmary table instance
from statsmodels.iolib.total_countmary import Summary
smry = Summary()
smry.add_concat_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_concat_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_concat_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overtotal', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overtotal', The average of the marginal effects at each
observation.
- 'average', The marginal effects at the average of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'total', The marginal effects at each observation. If `at` is 'total'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for total
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : numset-like, optional
Optiontotaly, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for total independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any_condition variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndnumset
the marginal effect corresponding to the ibnut options
Notes
-----
When using after Poisson, returns the expected number of events
per period, astotal_counting that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
get_min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differenceerences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or numset-like
If scalar, the number of points equtotaly spaced points on
the time differenceerence axis used to define bins for
calculating local averages. If an numset, the specific points
that define the bins.
get_min_n : integer
The get_minimum sample size in a bin for the average residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = bn.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.apd(re)
dists = bn.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).total_count(1))
xdt.apd(dists)
xre = bn.connect(xre)
xdt = bn.connect(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = bn.flatnonzero(xdt == 0)
v0 = bn.average(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual average of a skewed distribution).
if bn.isscalar(xpoints):
xpoints = bn.linspace(0, get_max(xdt), xpoints)
dg = bn.digitize(xdt, xpoints)
dgu = bn.uniq(dg)
hist = bn.asnumset([bn.total_count(dg == k) for k in dgu])
ii = bn.flatnonzero(hist >= get_min_n)
dgu = dgu[ii]
dgy = bn.asnumset([bn.average(xre[dg == k]) for k in dgu])
dgx = bn.asnumset([bn.average(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time differenceerence")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : numset-like
The first dep_params in the sequence
dep_params_last : numset-like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : numset-like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in bn.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.apd(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.apd(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = bn.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = bn.zeros((len(endog), 1))
exog = bn.asnumset(exog)
endog = bn.asnumset(endog)
groups = bn.asnumset(groups)
time = bn.asnumset(time)
offset = bn.asnumset(offset)
# The uniq outcomes, except the greatest one.
self.endog_values = | bn.uniq(endog) | numpy.unique |
from collections import Counter, defaultdict
import itertools
try:
import igraph as ig
except ModuleNotFoundError:
ig = None
import beatnum as bn
import operator
import logging
#############################
# Fuzzy Modularity Measures #
#############################
def nepusz_modularity(G, cover):
raise NotImplementedError("See the CONGA 2010 paper")
def zhang_modularity(G, cover):
raise NotImplementedError("""See 'Identification of overlapping algorithms structure in
complex networks using fuzzy C-averages clustering'""")
def nicosia_modularity(G, cover):
raise NotImplementedError("""See 'Extending the definition of
modularity to directed graphs with overlapping communities'""")
#############################
# Crisp modularity measures #
#############################
def count_communities(G, cover):
"""
Helper for lazar_modularity.
Returns a dict {v:count} filter_condition v is a vertex id and
count is the number of differenceerent communities it is
assigned to.
"""
counts = {i.index : 0 for i in G.vs}
for community in cover:
for v in community:
counts[v] += 1
return counts
def get_weights(G):
"""
Given a graph G, returns a list of weights. If the graph is unweighted,
returns a list of 1s the same length as the number of edges.
"""
try:
# asstotal_countes weight as an attribute name averages graph is weighted.
weights = G.es['weight']
except KeyError:
#unweighted averages total weights are 1.
weights = [1 for e in G.es]
return weights
def get_single_lazar_modularity(G, community, weights, counts):
"""
Returns the lazar modularity of a single algorithms.
"""
totalInternalWeight = total_count(weights[G.es[e].index] for e in community) # m_c in paper
numVerticesInCommunity = len(community) # V_c in paper
numPossibleInternalEdges = numVerticesInCommunity * (numVerticesInCommunity - 1) / 2
if numPossibleInternalEdges == 0: return 0
edgeDensity = totalInternalWeight / numPossibleInternalEdges / numVerticesInCommunity
interVsIntra = 0
comm = set(community)
for v in community:
interVsIntraInternal = 0
neighbors = G.neighbors(v)
degree = len(neighbors) # k_i in paper
numCommunitiesWithin = counts[v] # s_i in paper
for n in neighbors:
weight = weights[G.get_eid(v, n)]
if n in comm:
interVsIntraInternal += weight
else:
interVsIntraInternal -= weight
interVsIntraInternal /= (degree * numCommunitiesWithin)
interVsIntra += interVsIntraInternal
return edgeDensity * interVsIntra
def lazar_modularity(G, cover):
"""
Returns the crisp modularity measure as defined by Lazar et al. 2009
Defined as the average edge density times normlizattionalized differenceerence
between inter and intracommunity edges for each algorithms.
See CONGA 2010 or Lazar's paper for a precise definition.
"""
numCommunities = len(cover) # |C| in the paper
totalModularity = 0 # M_c in the paper
weights = get_weights(G)
counts = count_communities(G, cover)
for c in cover:
totalModularity += get_single_lazar_modularity(G, c, weights, counts)
averageModularity = 1/numCommunities * totalModularity # M in the paper
return averageModularity
##################################
# Classes for overlapping covers #
##################################
class CrispOverlap(object):
"""
TODO
"""
def __init__(self, graph, covers, modularities=None, optimal_count=None, modularity_measure="lazar"):
"""
Initializes a CrispOverlap object with the given parameters.
Graph: The graph to which the object refers
covers: a dict of VertexCovers, also referring to this graph, of the form {k : v}
filter_condition k is the number of clusters and v is the vertexCluste
modularities (optional): a dict of modularities of the form {c:m} filter_condition c is
the number of clusters and m is the modularity.
optimal_count (optional): A hint for the number of clusters to use.
modularity_measure (optional): The name of the modularity function to use.
Right now, the only choice is "lazar."
"""
# Possibly figure out a better data structure like a merge
# list that contains total information needed?
# So far only know of Lazar's measure for crisp overlapping.
self._measureDict = {"lazar" : lazar_modularity}
self._covers = covers
self._graph = graph
self._optimal_count = optimal_count
self._modularities = modularities
if modularity_measure in self._measureDict:
self._modularity_measure = modularity_measure
else: raise KeyError("Modularity measure not found.")
def __getitem__(self, numClusters):
"""
Returns the cover with the given number of clusters.
"""
if not numClusters:
raise KeyError("Number of clusters must be a positive integer.")
return self._covers[numClusters]
def __iter__(self):
"""
Iterates over the covers in the list.
"""
return (v for v in list(self._covers.values()))
def __len__(self):
"""
Returns the number of covers in the list.
"""
return len(self._covers)
def __bool__(self):
"""
Returns True when there is at least one cover in the list.
"""
return bool(self._covers)
def __str__(self):
"""
Returns a string representation of the list of covers.
"""
return '{0} vertices in {1} possible covers.'.format(len(self._graph.vs), len(self._covers))
def as_cover(self):
"""
Returns the optimal cover (by modularity) from the object.
"""
return self._covers[self.optimal_count]
def recalculate_modularities(self):
"""
Recalculates the modularities and optimal count using the modularity_measure.
"""
modDict = {}
for cover in self._covers.values():
modDict[len(cover)] = self._measureDict[self._modularity_measure](self._graph, cover)
self._modularities = modDict
self._optimal_count = get_max(iter(self._modularities.items()), key=operator.itemgetter(1))[0]
return self._modularities
@property
def modularities(self):
"""
Returns the a dict {c : m} filter_condition c is the number of clusters
in the cover and m is the modularity. If modularity has not
been calculated, it recalculates it for total covers. Otherwise,
it returns the stored dict.
Note: Ctotal recalculate_modularities to recalculate the modularity.
"""
if self._modularities:
return self._modularities
self._modularities = self.recalculate_modularities()
return self._modularities
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time and
recalculate_modularities has not been ctotaled, this property simply returns the
hint. If such a count was not given, this method calculates the optimal cover
by get_maximizing the modularity along total possible covers in the object.
Note: Ctotal recalculate_modularities to recalculate the optimal count.
"""
if self._optimal_count is not None:
return self._optimal_count
else:
modularities = self.modularities
self._optimal_count = get_max(list(modularities.items()), key=operator.itemgetter(1))[0]
return self._optimal_count
def pretty_print_cover(self, numClusters, label='CONGA_index'):
"""
Takes a cover in vertex-id form and prints it nicely
using label as each vertex's name.
"""
cover = self._covers[numClusters]
#if label == 'CONGA_index':
pp = [self._graph.vs[num] for num in [cluster for cluster in cover]]
#else:
# pp = [G.vs[num][label] for num in [cluster for cluster in cover]]
for count, comm in enumerate(pp):
print("Community {0}:".format(count))
for v in comm:
print('\t {0}'.format(v.index if label == 'CONGA_index' else v[label]))
print()
def make_fuzzy(self):
"""
TODO. see CONGA 2010
"""
pass
#
###################################################################################################################################################
# TODO:
# * only ctotal fix_betweennesses when needed
def congo(OG, h=2):
"""
Provides an Implementation of the CONGO algorithm defined by <NAME>
in his 2010 paper "A Fast Algorithm to Find Overlapping Communities in Networks."
The parameters are OG, the graph on which the analysis is to be performed, and h,
the length of the longest shortest path that Congo is to consider.
"""
logging.basicConfig(filename='congo.log',level=logging.DEBUG)
G = OG.copy()
# Just in case the original graph is disconnected
if not G.is_connected():
raise RuntimeError("Congo only makes sense for connected graphs.")
# initializing attributes of copied graph
G.vs['CONGA_orig'] = [i.index for i in OG.vs]
G.es['eb'] = 0
G.vs['pb'] = [{uw : 0 for uw in itertools.combinations(G.neighbors(vertex), 2)} for vertex in G.vs]
# initializing total pair and edge betweennesses
do_initial_betweenness(G, h)
nClusters = 1
# The first cover is simply the entire connected graph.
totalCovers = {nClusters : ig.VertexCover(OG)}
while G.es:
logging.info("%d edges remaining", len(G.es))
# get the edge with the get_max edge betweenness, and its betweenness.
get_maxEdge, get_maxEb = get_max(enumerate(G.es['eb']), key=operator.itemgetter(1))
G.vs['vb'] = G.betweenness(cutoff=h)
# since sep_split betweennes is upper bounded by vertex betweenness, we
# only need to look at the vertices for which the vertex betweenness
# is greater than the get_max edge betweenness. (We multiply by 2
# because our edge betweenness calculations yield values in both
# directions.)
# TODO check if I need to multiply by 2
vInteresting = [i for i, b in enumerate(G.vs['vb']) if 2 * b > get_maxEb]
logging.info("Vertices to exaget_mine: %s", vInteresting)
sep_splitInstr = get_max_sep_split_betweenness(G, vInteresting)
# sep_split if get_max sep_split betweenness > get_max edge betweenness
if sep_splitInstr is None or sep_splitInstr[0] <= get_maxEb:
sep_split = remove_operation_edge(G, get_maxEdge, h)
else:
sep_split = sep_split_vertex(G, sep_splitInstr[1], sep_splitInstr[2], h)
if sep_split:
# there must be a new algorithms
comm = G.components().membership
cover = get_cover(G, OG, comm)
nClusters += 1
# short circuit stuff would go here.
totalCovers[nClusters] = cover
return CrispOverlap(OG, totalCovers)
def remove_operation_edge(G, edge, h):
"""
Given a graph G and one of its edges in tuple form, checks if the deletion
sep_splits the graph.
"""
tup = G.es[edge].tuple
logging.info("Deleted: %s", tup)
neighborhood = get_neighborhood_edge(G, tup, h)
# subtracts local betweennesses in the region, as discussed
# in the paper
do_local_betweenness(G, neighborhood, h, operator.neg)
G.remove_operation_edges(edge)
fix_betweennesses(G)
# add_concats back in local betweennesses after the deletion
do_local_betweenness(G, neighborhood, h, operator.pos)
return check_for_sep_split(G, tup)
def fix_pair_betweennesses(G):
"""
Given a graph G, makes sure that total of the pair betweennesses
listed as attributes remain possible, and removes those that are not.
Also add_concats new attributes filter_condition new edges have been add_concated.
"""
for v in G.vs:
toDel = []
neededPairs = {uw for uw in itertools.combinations(G.neighbors(v), 2)}
for pair in v['pb']:
if pair not in neededPairs:
toDel.apd(pair)
for d in toDel:
del v['pb'][d]
for pair in neededPairs:
if pair not in v['pb']:
v['pb'][pair] = 0
def fix_edge_betweennesses(G):
"""
Given a graph G, makes sure that every edge has a betweenness
score assigned to it.
"""
for e in G.es:
if e['eb'] is None:
e['eb'] = 0
def fix_betweennesses(G):
"""
Fixes the pair and edge betweennesses such that every attribute is up to date.
"""
fix_pair_betweennesses(G)
fix_edge_betweennesses(G)
def sep_split_vertex(G, vToSplit, instr, h):
"""
Splits the vertex v into two new vertices, each with
edges depending on s. Returns True if the sep_split
divided the graph, else False.
"""
neighborhood = get_neighborhood_vertex(G, vToSplit, h)
do_local_betweenness(G, neighborhood, h, operator.neg)
new_index = G.vcount()
G.add_concat_vertex()
G.vs[new_index]['CONGA_orig'] = G.vs[vToSplit]['CONGA_orig']
G.vs[new_index]['pb'] = {uw : 0 for uw in itertools.combinations(G.neighbors(vToSplit), 2)}
# add_concating total relevant edges to new vertex, deleting from old one.
toAdd = list(zip(itertools.duplicate(new_index), instr[0]))
toDelete = list(zip(itertools.duplicate(vToSplit), instr[0]))
G.add_concat_edges(toAdd)
G.remove_operation_edges(toDelete)
neighborhood.apd(new_index)
fix_betweennesses(G)
logging.info("sep_split: %d, %s", vToSplit, instr)
do_local_betweenness(G, neighborhood, h, operator.pos)
# check if the two new vertices are disconnected.
return check_for_sep_split(G, (vToSplit, new_index))
def get_max_sep_split_betweenness(G, vInteresting):
"""
Performs the greedy algorithm discussed in the 2007 CONGA paper
to approximate the get_maximum sep_split betweenness. Returns a tuple
(a, b, c) filter_condition a is the get_maximum score, b the vertex to sep_split
to acheive the score, and c a list of the instructions for which
neighbors to connect to each side of the sep_split.
"""
get_maxSplitBetweenness = 0
vToSplit = None
# for every vertex of interest, we want to figure out the get_maximum score achievable
# by sep_splitting the vertices in various ways, and return that optimal sep_split
for v in vInteresting:
clique = create_clique(G, v, G.vs['pb'][v])
if clique.size < 4:
continue
# initialize a list on how we will map the neighbors to the collapsing matrix
vMap = [[ve] for ve in G.neighbors(v)]
# we want to keep collapsing the matrix until we have a 2x2 matrix and its
# score. Then we want to remove index j from our vMap list and connect
# it with the vMap[i]. This begins building a way of keeping track of how
# we are sep_splitting the vertex and its neighbors
while clique.size > 4:
i,j,clique = reduce_matrix(clique)
vMap[i] += vMap.pop(j)
if clique[0,1] >= get_maxSplitBetweenness:
get_maxSplitBetweenness = clique[0,1]
vToSplit = v
sep_splitInstructions = vMap
if vToSplit is None:
return None
return get_maxSplitBetweenness, vToSplit, sep_splitInstructions
def do_initial_betweenness(G, h):
"""
Given a graph G and a depth h, calculates total edge and pair betweennesses
and updates G's attributes to reflect the new scores.
"""
# Not guaranteed to work on multigraphs.
total_pairs_shortest_paths = []
# Counter for normlizattionalizing scores
pathCounts = Counter()
for ver in G.vs:
logging.info("initializing betweennesses for %d", ver.index)
neighborhood = get_neighborhood_vertex(G, ver, h)
neighborhood.remove(ver.index)
#for i, v in enumerate(neighborhood):
s_s_shortest_paths = G.get_total_shortest_paths(ver, to=neighborhood)#[i+1:])
total_pairs_shortest_paths += s_s_shortest_paths
# to ignore duplicate edges, uncomment the next line.
#total_pairs_shortest_paths = set(tuple(p) for p in total_pairs_shortest_paths)
for path in total_pairs_shortest_paths:
pathCounts[(path[0], path[-1])] += 1
logging.info("updating total betweenness attributes...")
for path in total_pairs_shortest_paths:
if len(path) <= h + 1:
update_betweenness(G, path, pathCounts[(path[0], path[-1])], operator.pos)
def do_local_betweenness(G, neighborhood, h, op=operator.pos):
"""
Given a neighborhood and depth h, recalculates total betweennesses
confined to the neighborhood. If op is operator.neg, it subtracts these
betweennesses from the current create_ones. Otherwise, it add_concats them.
"""
total_pairs_shortest_paths = []
pathCounts = Counter()
for i, v in enumerate(neighborhood):
s_s_shortest_paths = G.get_total_shortest_paths(v, to=neighborhood)#[i+1:])
total_pairs_shortest_paths += s_s_shortest_paths
neighSet = set(neighborhood)
neighSize = len(neighborhood)
apsp = []
for path in total_pairs_shortest_paths:
# path does not go out of region
if len(neighSet | set(path)) == neighSize:
pathCounts[(path[0], path[-1])] += 1 # can improve
apsp.apd(path)
for path in apsp:
if len(path) <= h + 1:
update_betweenness(G, path, pathCounts[(path[0], path[-1])], op)
def update_betweenness(G, path, count, op):
"""
Given a shortest path in G, along with a count of paths
that length, to deterget_mine weight, updates the edge and
pair betweenness dicts with the path's new information.
"""
weight = op(1./count)
pos = 0
while pos < len(path) - 2:
G.vs[path[pos + 1]]['pb'][order_tuple((path[pos], path[pos + 2]))] += weight
G.es[G.get_eid(path[pos], path[pos + 1])]['eb'] += weight
pos += 1
if pos < len(path) - 1:
G.es[G.get_eid(path[pos], path[pos + 1])]['eb'] += weight
def get_cover(G, OG, comm):
"""
Given the graph, the original graph, and a algorithms
membership list, returns a vertex cover of the communities
referring back to the original algorithms.
"""
coverDict = defaultdict(list)
for i, community in enumerate(comm):
coverDict[community].apd(int(G.vs[i]['CONGA_orig']))
return ig.clustering.VertexCover(OG, clusters=list(coverDict.values()))
def vertex_betweeenness_from_eb(G, eb):
"""
Calculates the vertex betweenness scores in G. Returns a list
in which the indices are the vertex indices and the values are
their betweeennesses. The same as G.betweenness(), but faster because
it uses the edge betweenness scores.
(CONGA, page 4, equation 1)
"""
components = G.components()
membership = components.membership
vbs = []
for vertex in G.vs:
numComponents = len(components[membership[vertex.index]])
incidentEdges = G.incident(vertex)
vb = .5 * (total_count(G.es[e]['eb'] for e in incidentEdges) - (numComponents - 1))
vbs.apd(vb)
return vbs
def get_neighborhood_vertex(G, v, h):
"""
Given a vertex and a height/depth to
traverse, find the neighborhood as defined in the CONGA
paper.
"""
return G.neighborhood(v, order=h)
def get_neighborhood_edge(G, e, h):
"""
Given an edge and a height/depth to
traverse, find the neighborhood as defined in the CONGA
paper.
"""
neigh = set(G.neighborhood(e[0], order=h-1))
neigh.update(G.neighborhood(e[1], order=h-1))
return list(neigh)
def order_tuple(toOrder):
if toOrder[0] <= toOrder[1]:
return toOrder
return (toOrder[1], toOrder[0])
def create_clique(G, v, pb):
"""
Given a vertex and its pair betweennesses, returns a k-clique
representing total of its neighbors, with edge weights deterget_mined by the pair
betweenness scores. Algorithm discussed on page 5 of the CONGA paper.
"""
neighbors = G.neighbors(v)
# map each neighbor to its index in the adjacency matrix
mapping = {neigh : i for i, neigh in enumerate(neighbors)}
n = len(neighbors)
# Can use ints instead: (dtype=int). Only works if we use matrix_get_min
# instead of mat_get_min.
clique = bn.matrix(bn.zeros((n, n)))
for uw, score in pb.items():
clique[mapping[uw[0]], mapping[uw[1]]] = score
clique[mapping[uw[1]], mapping[uw[0]]] = score
# Ignore any_condition self loops if they're there. If not, this line
# does nothing and can be removed.
bn.pad_diagonal(clique, 0)
return clique
def reduce_matrix(M):
"""
Given a matrix M, collapses the row and column of the get_minimum value. This is just
an adjacency matrix way of implementing the greedy "collapse" discussed in CONGA.
Returns the new matrix and the collapsed indices.
"""
i,j = mat_get_min(M)
#i, j = matrix_get_min(M)
# add_concat the ith row to the jth row and overwrite the ith row with those values
M[i,:] = M[j,:] + M[i,:]
# remove_operation the jth row
M = bn.remove_operation(M, (j), axis=0)
# similarly with the columns
M[:,i] = M[:,j] + M[:,i]
M = bn.remove_operation(M, (j), axis=1)
| bn.pad_diagonal(M,0) | numpy.fill_diagonal |
import os
from pprint import pprint
import beatnum as bn
import torch
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
import skimaginarye
import network as network_lib
from loss.CEL import CEL
from utils.dataloader import create_loader
from utils.metric import cal_get_maxf, cal_pr_mae_averagef
from measure.saliency_toolbox import (
read_and_normlizattionalize,
average_square_error,
e_measure,
s_measure,
adaptive_fmeasure,
weighted_fmeasure,
prec_rectotal,
)
from utils.misc import (
AvgMeter,
construct_print,
write_data_to_file,
)
from utils.pipeline_ops import (
get_total_loss,
make_optimizer,
make_scheduler,
retotal_counte_checkpoint,
save_checkpoint,
)
from utils.recorder import TBRecorder, Timer, XLSXRecoder
from datetime import datetime
class Solver:
def __init__(self, exp_name: str, arg_dict: dict, path_dict: dict):
super(Solver, self).__init__()
self.exp_name = exp_name
self.arg_dict = arg_dict
self.path_dict = path_dict
self.dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.to_pil = transforms.ToPILImage()
self.tr_data_path = self.arg_dict["rgb_data"]["tr_data_path"]
self.te_data_list = self.arg_dict["rgb_data"]["te_data_list"]
self.save_path = self.path_dict["save"]
self.save_pre = self.arg_dict["save_pre"]
if self.arg_dict["tb_update"] > 0:
self.tb_recorder = TBRecorder(tb_path=self.path_dict["tb"])
if self.arg_dict["xlsx_name"]:
self.xlsx_recorder = XLSXRecoder(xlsx_path=self.path_dict["xlsx"],module_name=self.arg_dict["model"],model_name=self.exp_name)
# 依赖与前面属性的属性
self.tr_loader = create_loader(
data_path=self.tr_data_path,
training=True,
size_list=self.arg_dict["size_list"],
prefix=self.arg_dict["prefix"],
get_length=False,
)
self.end_epoch = self.arg_dict["epoch_num"]
self.iter_num = self.end_epoch * len(self.tr_loader)
if hasattr(network_lib, self.arg_dict["model"]):
self.net = getattr(network_lib, self.arg_dict["model"])().to(self.dev)
else:
raise AttributeError
pprint(self.arg_dict)
if self.arg_dict["retotal_counte_mode"] == "test" or self.arg_dict["retotal_counte_mode"] == "measure":
# retotal_counte model only to test model.
# self.start_epoch is useless
retotal_counte_checkpoint(
model=self.net, load_path=self.path_dict["final_state_net"], mode="onlynet",
)
return
self.loss_funcs = [
torch.nn.BCEWithLogitsLoss(reduction=self.arg_dict["reduction"]).to(self.dev)
]
if self.arg_dict["use_aux_loss"]:
self.loss_funcs.apd(CEL().to(self.dev))
self.opti = make_optimizer(
model=self.net,
optimizer_type=self.arg_dict["optim"],
optimizer_info=dict(
lr=self.arg_dict["lr"],
momentum=self.arg_dict["momentum"],
weight_decay=self.arg_dict["weight_decay"],
nesterov=self.arg_dict["nesterov"],
),
)
self.sche = make_scheduler(
optimizer=self.opti,
total_num=self.iter_num if self.arg_dict["sche_usebatch"] else self.end_epoch,
scheduler_type=self.arg_dict["lr_type"],
scheduler_info=dict(
lr_decay=self.arg_dict["lr_decay"], warmup_epoch=self.arg_dict["warmup_epoch"]
),
)
# AMP
if self.arg_dict["use_amp"]:
construct_print("Now, we will use the amp to accelerate training!")
from apex import amp
self.amp = amp
self.net, self.opti = self.amp.initialize(self.net, self.opti, opt_level="O1")
else:
self.amp = None
if self.arg_dict["retotal_counte_mode"] == "train":
# retotal_counte model to train the model
self.start_epoch = retotal_counte_checkpoint(
model=self.net,
optimizer=self.opti,
scheduler=self.sche,
amp=self.amp,
exp_name=self.exp_name,
load_path=self.path_dict["final_full_value_func_net"],
mode="total",
)
else:
# only train a new model.
self.start_epoch = 0
def train(self):
for curr_epoch in range(self.start_epoch, self.end_epoch):
train_loss_record = AvgMeter()
self._train_per_epoch(curr_epoch, train_loss_record)
# 根据周期修改学习率
if not self.arg_dict["sche_usebatch"]:
self.sche.step()
# 每个周期都进行保存测试,保存的是针对第curr_epoch+1周期的参数
save_checkpoint(
model=self.net,
optimizer=self.opti,
scheduler=self.sche,
amp=self.amp,
exp_name=self.exp_name,
current_epoch=curr_epoch + 1,
full_value_func_net_path=self.path_dict["final_full_value_func_net"],
state_net_path=self.path_dict["final_state_net"],
) # 保存参数
if self.arg_dict["use_amp"]:
# https://github.com/NVIDIA/apex/issues/567
with self.amp.disable_casts():
construct_print("When evaluating, we wish to evaluate in pure fp32.")
self.test()
else:
self.test()
@Timer
def _train_per_epoch(self, curr_epoch, train_loss_record):
for curr_iter_in_epoch, train_data in enumerate(self.tr_loader):
num_iter_per_epoch = len(self.tr_loader)
curr_iter = curr_epoch * num_iter_per_epoch + curr_iter_in_epoch
self.opti.zero_grad()
train_ibnuts, train_masks, _ = train_data
train_ibnuts = train_ibnuts.to(self.dev, non_blocking=True)
train_masks = train_masks.to(self.dev, non_blocking=True)
train_preds = self.net(train_ibnuts)
train_loss, loss_item_list = get_total_loss(train_preds, train_masks, self.loss_funcs)
if self.amp:
with self.amp.scale_loss(train_loss, self.opti) as scaled_loss:
scaled_loss.backward()
else:
train_loss.backward()
self.opti.step()
if self.arg_dict["sche_usebatch"]:
self.sche.step()
# 仅在累计的时候使用item()获取数据
train_iter_loss = train_loss.item()
train_batch_size = train_ibnuts.size(0)
train_loss_record.update(train_iter_loss, train_batch_size)
# 显示tensorboard
if (
self.arg_dict["tb_update"] > 0
and (curr_iter + 1) % self.arg_dict["tb_update"] == 0
):
self.tb_recorder.record_curve("trloss_avg", train_loss_record.avg, curr_iter)
self.tb_recorder.record_curve("trloss_iter", train_iter_loss, curr_iter)
self.tb_recorder.record_curve("lr", self.opti.param_groups, curr_iter)
self.tb_recorder.record_imaginarye("trmasks", train_masks, curr_iter)
self.tb_recorder.record_imaginarye("trsodout", train_preds.sigmoid(), curr_iter)
self.tb_recorder.record_imaginarye("trsodin", train_ibnuts, curr_iter)
# 记录每一次迭代的数据
if (
self.arg_dict["print_freq"] > 0
and (curr_iter + 1) % self.arg_dict["print_freq"] == 0
):
lr_str = ",".join(
[f"{param_groups['lr']:.7f}" for param_groups in self.opti.param_groups]
)
log = (
f"{curr_iter_in_epoch}:{num_iter_per_epoch}/"
f"{curr_iter}:{self.iter_num}/"
f"{curr_epoch}:{self.end_epoch} "
f"{self.exp_name}\n"
f"Lr:{lr_str} "
f"M:{train_loss_record.avg:.5f} C:{train_iter_loss:.5f} "
f"{loss_item_list}"
)
print(log)
write_data_to_file(log, self.path_dict["tr_log"])
def test(self):
self.net.eval()
msg = f"Testing start time: {datetime.now()}"
construct_print(msg)
write_data_to_file(msg, self.path_dict["te_log"])
total_results = {}
for data_name, data_path in self.te_data_list.items():
construct_print(f"Testing with testset: {data_name}")
self.te_loader = create_loader(
data_path=data_path,
training=False,
prefix=self.arg_dict["prefix"],
get_length=False,
)
self.save_path = os.path.join(self.path_dict["save"], data_name)
if not os.path.exists(self.save_path):
construct_print(f"{self.save_path} do not exist. Let's create it.")
os.makedirs(self.save_path)
results = self._test_process(save_pre=self.save_pre)
msg = f"Results on the testset({data_name}:'{data_path}'): {results}"
construct_print(msg)
write_data_to_file(msg, self.path_dict["te_log"])
# Print out time taken
msg = f"Time Finish on testset {data_name}: {datetime.now()}"
construct_print(msg)
write_data_to_file(msg, self.path_dict["te_log"])
total_results[data_name] = results
self.net.train()
if self.arg_dict["xlsx_name"]:
# save result into xlsx file.
self.xlsx_recorder.write_xlsx(self.exp_name, total_results)
def _test_process(self, save_pre):
loader = self.te_loader
# pres = [AvgMeter() for _ in range(256)]
# recs = [AvgMeter() for _ in range(256)]
pres = list()
recs = list()
averagefs = AvgMeter()
maes = AvgMeter()
# Measures from Saliency toolbox
measures = ['Wgt-F', 'E-measure', 'S-measure', 'Mod-Max-F', 'Mod-Adp-F', 'Mod-Wgt-F']
beta=bn.sqrt(0.3) # default beta parameter used in the adaptive F-measure
gt_threshold=0.5 # The threshold that is used to binrize ground truth maps.
values = dict() # initialize measure value dictionary
pr = dict() # initialize precision rectotal dictionary
prm = dict() # initialize precision rectotal dictionary for Mod-Max-F
for idx in measures:
values[idx] = list()
if idx == 'Max-F':
pr['Precision'] = list()
pr['Rectotal'] = list()
if idx == 'Mod-Max-F':
prm['Precision'] = list()
prm['Rectotal'] = list()
tqdm_iter = tqdm(enumerate(loader), total=len(loader), leave=False)
for test_batch_id, test_data in tqdm_iter:
tqdm_iter.set_description(f"{self.exp_name}: te=>{test_batch_id + 1}")
in_imgs, in_mask_paths, in_names = test_data
generate_out_imgs = False
if self.arg_dict["retotal_counte_mode"] == "measure":
# Check if prediction masks have already been created
for item_id, in_fname in enumerate(in_names):
oimg_path = os.path.join(self.save_path, in_fname + ".png")
if not os.path.exists(oimg_path):
# Out imaginarye doesn't exist yet
generate_out_imgs = True
break
else:
generate_out_imgs = True
if generate_out_imgs:
with torch.no_grad():
in_imgs = in_imgs.to(self.dev, non_blocking=True)
outputs = self.net(in_imgs)
outputs_bn = outputs.sigmoid().cpu().detach()
for item_id, in_fname in enumerate(in_names):
oimg_path = os.path.join(self.save_path, in_fname + ".png")
gimg_path = os.path.join(in_mask_paths[item_id])
gt_img = Image.open(gimg_path).convert("L")
if self.arg_dict["retotal_counte_mode"] == "measure" and generate_out_imgs == False:
out_img = Image.open(oimg_path).convert("L")
else:
out_item = outputs_bn[item_id]
out_img = self.to_pil(out_item).resize(gt_img.size, resample=Image.NEAREST)
if save_pre and generate_out_imgs:
out_img.save(oimg_path)
gt_img = bn.numset(gt_img)
out_img = bn.numset(out_img)
# Gather imaginaryes again using Saliency toolboxes import methods
# These imaginaryes will be grayscale floats between 0 and 1
sm = out_img.convert_type(bn.float32)
if sm.get_max() == sm.get_min():
sm = sm / 255
else:
sm = (sm - sm.get_min()) / (sm.get_max() - sm.get_min())
gt = bn.zeros_like(gt_img, dtype=bn.float32)
gt[gt_img > 256*gt_threshold] = 1
ps, rs, mae, averagef = cal_pr_mae_averagef(out_img, gt_img)
pres.apd(ps)
recs.apd(rs)
# for pidx, pdata in enumerate(zip(ps, rs)):
# p, r = pdata
# pres[pidx].update(p)
# recs[pidx].update(r)
maes.update(mae)
averagefs.update(averagef)
# Compute other measures using the Saliency Toolbox
if 'MAE2' in measures:
values['MAE2'].apd(average_square_error(gt, sm))
if 'E-measure' in measures:
values['E-measure'].apd(e_measure(gt, sm))
if 'S-measure' in measures:
values['S-measure'].apd(s_measure(gt, sm))
if 'Adp-F' in measures:
values['Adp-F'].apd(adaptive_fmeasure(gt, sm, beta, totalowBlackMask=False))
if 'Mod-Adp-F' in measures:
values['Mod-Adp-F'].apd(adaptive_fmeasure(gt, sm, beta, totalowBlackMask=True))
if 'Wgt-F' in measures:
values['Wgt-F'].apd(weighted_fmeasure(gt, sm, totalowBlackMask=False))
if 'Mod-Wgt-F' in measures:
values['Mod-Wgt-F'].apd(weighted_fmeasure(gt, sm, totalowBlackMask=True))
if 'Max-F' in measures:
prec, rectotal = prec_rectotal(gt, sm, 256, totalowBlackMask=False) # 256 thresholds between 0 and 1
# Check if precision rectotal curve exists
if len(prec) != 0 and len(rectotal) != 0:
pr['Precision'].apd(prec)
pr['Rectotal'].apd(rectotal)
if 'Mod-Max-F' in measures:
prec, rectotal = prec_rectotal(gt, sm, 256, totalowBlackMask=True) # 256 thresholds between 0 and 1
# Check if precision rectotal curve exists
if len(prec) != 0 and len(rectotal) != 0:
prm['Precision'].apd(prec)
prm['Rectotal'].apd(rectotal)
# Compute total measures over total imaginaryes
if 'MAE2' in measures:
values['MAE2'] = bn.average(values['MAE2'])
if 'E-measure' in measures:
values['E-measure'] = bn.average(values['E-measure'])
if 'S-measure' in measures:
values['S-measure'] = bn.average(values['S-measure'])
if 'Adp-F' in measures:
values['Adp-F'] = bn.average(values['Adp-F'])
if 'Mod-Adp-F' in measures:
values['Mod-Adp-F'] = bn.average(values['Mod-Adp-F'])
if 'Wgt-F' in measures:
values['Wgt-F'] = bn.average(values['Wgt-F'])
if 'Mod-Wgt-F' in measures:
values['Mod-Wgt-F'] = bn.average(values['Mod-Wgt-F'])
if 'Max-F' in measures:
if len(pr['Precision']) > 0:
pr['Precision'] = bn.average(bn.hpile_operation(pr['Precision'][:]), 1)
pr['Rectotal'] = bn.average(bn.hpile_operation(pr['Rectotal'][:]), 1)
f_measures = (1 + beta ** 2) * pr['Precision'] * pr['Rectotal'] / (
beta ** 2 * pr['Precision'] + pr['Rectotal'])
# Remove any_condition NaN values to totalow calculation
f_measures[bn.ifnan(f_measures)] = 0
values['Max-F'] = | bn.get_max(f_measures) | numpy.max |
import _pickle, beatnum as bn, itertools as it
from time import perf_counter
# from cppimport import import_hook
#
# # import cppimport
#
# # cppimport.set_quiet(False)
#
import rpxdock as rp
from rpxdock.bvh import bvh_test
from rpxdock.bvh import BVH, bvh
import rpxdock.homog as hm
def test_bvh_isect_cpp():
assert bvh_test.TEST_bvh_test_isect()
def test_bvh_isect_fixed():
# print()
get_mindist = 0.01
totbvh, totnaive = 0, 0
for i in range(10):
xyz1 = bn.random.rand(1000, 3) + [0.9, 0.9, 0]
xyz2 = bn.random.rand(1000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
assert len(bvh1) == 1000
pos1 = hm.htrans([0.9, 0.9, 0.9])
pos2 = bn.eye(4)
tbvh = perf_counter()
clash1 = bvh.bvh_isect_fixed(bvh1, bvh2, get_mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
clash2 = bvh.naive_isect_fixed(bvh1, bvh2, get_mindist)
tn = perf_counter() - tn
assert clash1 == clash2
# print(f"{i:3} clash {clash1:1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
print("total times", totbvh, totnaive / totbvh, totnaive)
def test_bvh_isect():
t = rp.Timer().start()
N1, N2 = 10, 10
N = N1 * N2
get_mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = bn.random.rand(1250, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1250, 3) - [0.5, 0.5, 0.5]
pos1 = hm.rand_xform(N2, cart_sd=0.8)
pos2 = hm.rand_xform(N2, cart_sd=0.8)
t.checkpoint('init')
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
t.checkpoint('BVH')
clash = list()
for inner in range(N2):
clash1 = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[inner], pos2=pos2[inner],
get_mindist=get_mindist)
t.checkpoint('bvh_isect')
clash2 = bvh.naive_isect(bvh1, bvh2, pos1[inner], pos2[inner], get_mindist)
t.checkpoint('naive_isect')
assert clash1 == clash2
clash.apd(clash1)
clashvec = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, get_mindist)
t.checkpoint('bvh_isect_vec')
assert bn.total(clashvec == clash)
nclash += total_count(clash)
assert clashvec[1] == bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2[1], get_mindist)
bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2[1], get_mindist) # ?? make sure api works?
bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2, get_mindist)
print(
f"Ngeom {N1:,} Npos {N2:,} isect {nclash/N:4.2f} bvh: {int(N/t.total_count.bvh_isect):,}/s",
f"bvh_vec {int(N/t.total_count.bvh_isect_vec):,} fastnaive {int(N/t.total_count.naive_isect):,}/s",
f"ratio {int(t.total_count.naive_isect/t.total_count.bvh_isect_vec):,}x",
)
def test_bvh_isect_fixed_range():
N1, N2 = 10, 10
N = N1 * N2
get_mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvh1_half = BVH(xyz1[250:750])
bvh2_half = BVH(xyz2[250:750])
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
isect1 = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, get_mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(isect1 == isect2)
bounds = [250], [749], [250], [749]
isect1 = bvh.bvh_isect_vec(bvh1_half, bvh2_half, pos1, pos2, get_mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, *bounds)
assert bn.total(isect1 == isect2)
def test_bvh_get_min_cpp():
assert bvh_test.TEST_bvh_test_get_min()
def test_bvh_get_min_dist_fixed():
xyz1 = bn.random.rand(5000, 3) + [0.9, 0.9, 0.0]
xyz2 = bn.random.rand(5000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist_fixed(bvh1, bvh2)
tbvh = perf_counter() - tbvh
dtest = bn.linalg.normlizattion(xyz1[i1] - xyz2[i2])
assert bn.totalclose(d, dtest, atol=1e-6)
# tbn = perf_counter()
# dbn = bn.get_min(bn.linalg.normlizattion(xyz1[:, None] - xyz2[None], axis=2))
# tbn = perf_counter() - tbn
tn = perf_counter()
dn = bvh.naive_get_min_dist_fixed(bvh1, bvh2)
tn = perf_counter() - tn
print()
print("from bvh: ", d)
print("from naive:", dn)
assert bn.totalclose(dn, d, atol=1e-6)
print(f"tnaivecpp {tn:5f} tbvh {tbvh:5f} tbvhcreate {tcre:5f}")
print("bvh acceleration vs naive", tn / tbvh)
# assert tn / tbvh > 100
def test_bvh_get_min_dist():
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
pos1 = hm.rand_xform(N, cart_sd=1)
pos2 = hm.rand_xform(N, cart_sd=1)
dis = list()
for i in range(N):
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tbvh = perf_counter() - tbvh
dtest = bn.linalg.normlizattion(pos1[i] @ hm.hpoint(xyz1[i1]) - pos2[i] @ hm.hpoint(xyz2[i2]))
assert bn.totalclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_get_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert bn.totalclose(dn, d, atol=1e-6)
dis.apd((d, i1, i2))
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# bn.linalg.normlizattion(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
d, i1, i2 = bvh.bvh_get_min_dist_vec(bvh1, bvh2, pos1, pos2)
for a, b, c, x in zip(d, i1, i2, dis):
assert a == x[0]
assert b == x[1]
assert c == x[2]
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_get_min_dist_floorget_min():
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
for i in range(N):
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
tbvh = perf_counter() - tbvh
dtest = bn.linalg.normlizattion(pos1 @ hm.hpoint(xyz1[i1]) - pos2 @ hm.hpoint(xyz2[i2]))
assert bn.totalclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_get_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert bn.totalclose(dn, d, atol=1e-6)
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# bn.linalg.normlizattion(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_slide_single_inline():
bvh1 = BVH([[-10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == 8
# moves xyz1 to -2,0,0
# should always come in from "infinity" from -direction
bvh1 = BVH([[10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == -12
# also moves xyz1 to -2,0,0
for i in range(100):
bn.random.seed(i)
dirn = bn.numset([bn.random.randn(), 0, 0])
dirn /= bn.linalg.normlizattion(dirn)
rad = bn.absolute(bn.random.randn() / 10)
xyz1 = bn.numset([[bn.random.randn(), 0, 0]])
xyz2 = bn.numset([[bn.random.randn(), 0, 0]])
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=rad, dirn=dirn)
xyz1 += d * dirn
assert bn.totalclose(bn.linalg.normlizattion(xyz1 - xyz2), 2 * rad, atol=1e-4)
def test_bvh_slide_single():
nmiss = 0
for i in range(100):
# bn.random.seed(i)
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
rad = bn.absolute(bn.random.randn())
xyz1 = bn.random.randn(1, 3)
xyz2 = bn.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, bn.eye(4), bn.eye(4), rad=rad, dirn=dirn)
if d < 9e8:
xyz1 += d * dirn
assert bn.totalclose(bn.linalg.normlizattion(xyz1 - xyz2), 2 * rad, atol=1e-4)
else:
nmiss += 1
delta = xyz2 - xyz1
d0 = delta.dot(dirn)
dperp2 = bn.total_count(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_single_xform():
nmiss = 0
for i in range(1000):
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
rad = bn.absolute(bn.random.randn() * 2.0)
xyz1 = bn.random.randn(1, 3)
xyz2 = bn.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform()
pos2 = hm.rand_xform()
d = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, rad=rad, dirn=dirn)
if d < 9e8:
p1 = (pos1 @ hm.hpoint(xyz1[0]))[:3] + d * dirn
p2 = (pos2 @ hm.hpoint(xyz2[0]))[:3]
assert bn.totalclose(bn.linalg.normlizattion(p1 - p2), 2 * rad, atol=1e-4)
else:
nmiss += 1
p2 = pos2 @ hm.hpoint(xyz2[0])
p1 = pos1 @ hm.hpoint(xyz1[0])
delta = p2 - p1
d0 = delta[:3].dot(dirn)
dperp2 = bn.total_count(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_whole():
# tiget_mings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhget_min 17,968/s fracmiss: 0.0834
# bn.random.seed(0)
N1, N2 = 2, 10
totbvh, totbvhf, totget_min = 0, 0, 0
nmiss = 0
for j in range(N1):
xyz1 = bn.random.rand(5000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(5000, 3) - [0.5, 0.5, 0.5]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
# bvh1f = BVH_32bit(xyz1)
# bvh2f = BVH_32bit(xyz2)
# tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
radius = 0.001 + bn.random.rand() / 10
slides = list()
for i in range(N2):
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1[i], pos2[i], radius, dirn)
tbvh = perf_counter() - tbvh
tbvhf = perf_counter()
# dslide = bvh.bvh_slide_32bit(bvh1f, bvh2f, pos1[i], pos2[i], radius, dirn)
tbvhf = perf_counter() - tbvhf
slides.apd(dslide)
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert dn > 2 * radius
nmiss += 1
else:
tmp = hm.htrans(dirn * dslide) @ pos1[i]
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, tmp, pos2[i])
tn = perf_counter() - tn
if not bn.totalclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert bn.totalclose(dn, 2 * radius, atol=1e-6)
# print(
# i,
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# bn.linalg.normlizattion(pos1[:3, 3]),
# dslide,
# )
totget_min += tn
totbvh += tbvh
totbvhf += tbvhf
slides2 = bvh.bvh_slide_vec(bvh1, bvh2, pos1, pos2, radius, dirn)
assert bn.totalclose(slides, slides2)
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
f"fracmiss: {nmiss/N}",
)
def test_collect_pairs_simple():
print("test_collect_pairs_simple")
bufbvh = -bn.create_ones((100, 2), dtype="i4")
bufnai = -bn.create_ones((100, 2), dtype="i4")
bvh1 = BVH([[0, 0, 0], [0, 2, 0]])
bvh2 = BVH([[0.9, 0, 0], [0.9, 2, 0]])
assert len(bvh1) == 2
get_mindist = 1.0
pos1 = bn.eye(4)
pos2 = bn.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert not o
print(pbvh.shape)
assert len(pbvh) == 2 and nnai == 2
assert bn.total(pbvh == [[0, 0], [1, 1]])
assert bn.total(bufnai[:nnai] == [[0, 0], [1, 1]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[0, 1]])
assert bn.total(bufnai[:nnai] == [[0, 1]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[1, 0]])
assert bn.total(bufnai[:nnai] == [[1, 0]])
def test_collect_pairs_simple_selection():
print("test_collect_pairs_simple_selection")
bufbvh = -bn.create_ones((100, 2), dtype="i4")
bufnai = -bn.create_ones((100, 2), dtype="i4")
crd1 = [[0, 0, 0], [0, 0, 0], [0, 2, 0], [0, 0, 0]]
crd2 = [[0, 0, 0], [0.9, 0, 0], [0, 0, 0], [0.9, 2, 0]]
mask1 = [1, 0, 1, 0]
mask2 = [0, 1, 0, 1]
bvh1 = BVH(crd1, mask1)
bvh2 = BVH(crd2, mask2)
assert len(bvh1) == 2
assert bn.totalclose(bvh1.radius(), 1.0, atol=1e-6)
assert bn.totalclose(bvh1.center(), [0, 1, 0], atol=1e-6)
get_mindist = 1.0
pos1 = bn.eye(4)
pos2 = bn.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert len(pbvh) == 2 and nnai == 2
assert bn.total(pbvh == [[0, 1], [2, 3]])
assert bn.total(bufnai[:nnai] == [[0, 1], [2, 3]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[0, 3]])
assert bn.total(bufnai[:nnai] == [[0, 3]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, get_mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert bn.total(pbvh == [[2, 1]])
assert bn.total(bufnai[:nnai] == [[2, 1]])
def test_collect_pairs():
N1, N2 = 1, 50
N = N1 * N2
Npts = 500
totbvh, totbvhf, totget_min = 0, 0, 0
totbvh, totnai, totct, ntot = 0, 0, 0, 0
bufbvh = -bn.create_ones((Npts * Npts, 2), dtype="i4")
bufnai = -bn.create_ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = bn.linalg.normlizattion(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.apd(x1)
pos2.apd(x2)
if len(pos1) == N2:
break
pos1 = bn.pile_operation(pos1)
pos2 = bn.pile_operation(pos2)
pairs = list()
get_mindist = 0.002 + bn.random.rand() / 10
for i in range(N2):
tbvh = perf_counter()
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], get_mindist, bufbvh)
tbvh = perf_counter() - tbvh
assert not o
tnai = perf_counter()
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], get_mindist, bufnai)
tnai = perf_counter() - tnai
tct = perf_counter()
nct = bvh.bvh_count_pairs(bvh1, bvh2, pos1[i], pos2[i], get_mindist)
tct = perf_counter() - tct
ntot += nct
assert nct == len(pbvh)
totnai += 1
pairs.apd(pbvh.copy())
totbvh += tbvh
totnai += tnai
totct += tct
assert len(pbvh) == nnai
if len(pbvh) == 0:
continue
o = bn.lexsort((pbvh[:, 1], pbvh[:, 0]))
pbvh[:] = pbvh[:][o]
o = bn.lexsort((bufnai[:nnai, 1], bufnai[:nnai, 0]))
bufnai[:nnai] = bufnai[:nnai][o]
assert bn.total(pbvh == bufnai[:nnai])
pair1 = pos1[i] @ hm.hpoint(xyz1[pbvh[:, 0]])[..., None]
pair2 = pos2[i] @ hm.hpoint(xyz2[pbvh[:, 1]])[..., None]
dpair = bn.linalg.normlizattion(pair2 - pair1, axis=1)
assert bn.get_max(dpair) <= get_mindist
pcount = bvh.bvh_count_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(pcount == [len(x) for x in pairs])
pairs2, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
for i, p in enumerate(pairs):
lb, ub = lbub[i]
assert bn.total(pairs2[lb:ub] == pairs[i])
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[:3], pos2[0], get_mindist)
assert len(y) == 3
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[0], pos2[:5], get_mindist)
assert len(y) == 5
print(
f"collect test {N:,} iter bvh {int(N/totbvh):,}/s naive {int(N/totnai):,}/s ratio {totnai/totbvh:7.2f} count-only {int(N/totct):,}/s avg cnt {ntot/N}"
)
def test_collect_pairs_range():
N1, N2 = 1, 500
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = bn.linalg.normlizattion(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.apd(x1)
pos2.apd(x2)
if len(pos1) == N2:
break
pos1 = bn.pile_operation(pos1)
pos2 = bn.pile_operation(pos2)
pairs = list()
get_mindist = 0.002 + bn.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(lbub == rlbub)
assert bn.total(pairs == rpairs)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, [250],
[750])
assert len(rlbub) == len(pos1)
assert bn.total(rpairs[:, 0] >= 250)
assert bn.total(rpairs[:, 0] <= 750)
filt_pairs = pairs[bn.logic_and_element_wise(pairs[:, 0] >= 250, pairs[:, 0] <= 750)]
# assert bn.total(filt_pairs == rpairs) # sketchy???
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, [600],
[1000], -1, [100], [400], -1)
assert len(rlbub) == len(pos1)
assert bn.total(rpairs[:, 0] >= 600)
assert bn.total(rpairs[:, 0] <= 1000)
assert bn.total(rpairs[:, 1] >= 100)
assert bn.total(rpairs[:, 1] <= 400)
filt_pairs = pairs[(pairs[:, 0] >= 600) * (pairs[:, 0] <= 1000) * (pairs[:, 1] >= 100) *
(pairs[:, 1] <= 400)]
assert bn.total(filt_pairs == rpairs) # sketchy???
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
def test_collect_pairs_range_sym():
# bn.random.seed(132)
N1, N2 = 5, 100
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = bn.linalg.normlizattion(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.apd(x1)
pos2.apd(x2)
if len(pos1) == N2:
break
pos1 = bn.pile_operation(pos1)
pos2 = bn.pile_operation(pos2)
pairs = list()
get_mindist = 0.002 + bn.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, get_mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(lbub == rlbub)
assert bn.total(pairs == rpairs)
bounds = [100], [400], len(xyz1) // 2
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, *bounds)
assert len(rlbub) == len(pos1)
assert bn.total(
bn.logical_or(bn.logic_and_element_wise(100 <= rpairs[:, 0], rpairs[:, 0] <= 400),
bn.logic_and_element_wise(600 <= rpairs[:, 0], rpairs[:, 0] <= 900)))
filt_pairs = pairs[bn.logical_or(bn.logic_and_element_wise(100 <= pairs[:, 0], pairs[:, 0] <= 400),
bn.logic_and_element_wise(600 <= pairs[:, 0], pairs[:, 0] <= 900))]
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
bounds = [100], [400], len(xyz1) // 2, [20], [180], len(xyz1) // 5
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, *bounds)
def awful(p):
return bn.logic_and_element_wise(
bn.logical_or(bn.logic_and_element_wise(100 <= p[:, 0], p[:, 0] <= 400),
bn.logic_and_element_wise(600 <= p[:, 0], p[:, 0] <= 900)),
bn.logical_or(
bn.logic_and_element_wise(+20 <= p[:, 1], p[:, 1] <= 180),
bn.logical_or(
bn.logic_and_element_wise(220 <= p[:, 1], p[:, 1] <= 380),
bn.logical_or(
bn.logic_and_element_wise(420 <= p[:, 1], p[:, 1] <= 580),
bn.logical_or(bn.logic_and_element_wise(620 <= p[:, 1], p[:, 1] <= 780),
bn.logic_and_element_wise(820 <= p[:, 1], p[:, 1] <= 980))))))
assert len(rlbub) == len(pos1)
assert bn.total(awful(rpairs))
filt_pairs = pairs[awful(pairs)]
assert bn.total(filt_pairs == rpairs) # sketchy???
assert bn.totalclose(bn.uniq(filt_pairs, axis=1), bn.uniq(rpairs, axis=1))
def test_slide_collect_pairs():
# tiget_mings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhget_min 17,968/s fracmiss: 0.0834
# bn.random.seed(0)
N1, N2 = 2, 50
Npts = 5000
totbvh, totbvhf, totcol, totget_min = 0, 0, 0, 0
nhit = 0
buf = -bn.create_ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyzcol1 = xyz1[:int(Npts / 5)]
xyzcol2 = xyz2[:int(Npts / 5)]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvhcol1 = BVH(xyzcol1)
bvhcol2 = BVH(xyzcol2)
# tcre = perf_counter() - tcre
for i in range(N2):
dirn = bn.random.randn(3)
dirn /= bn.linalg.normlizattion(dirn)
radius = 0.001 + bn.random.rand() / 10
pairdis = 3 * radius
pos1 = hm.rand_xform(cart_sd=0.5)
pos2 = hm.rand_xform(cart_sd=0.5)
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, radius, dirn)
tbvh = perf_counter() - tbvh
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert dn > 2 * radius
else:
nhit += 1
pos1 = hm.htrans(dirn * dslide) @ pos1
tn = perf_counter()
dn, i, j = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
if not bn.totalclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert bn.totalclose(dn, 2 * radius, atol=1e-6)
tcol = perf_counter()
pair, o = bvh.bvh_collect_pairs(bvhcol1, bvhcol2, pos1, pos2, pairdis, buf)
assert not o
if len(pair) > 0:
tcol = perf_counter() - tcol
totcol += tcol
pair1 = pos1 @ hm.hpoint(xyzcol1[pair[:, 0]])[..., None]
pair2 = pos2 @ hm.hpoint(xyzcol2[pair[:, 1]])[..., None]
dpair = bn.linalg.normlizattion(pair2 - pair1, axis=1)
assert bn.get_max(dpair) <= pairdis
totget_min += tn
totbvh += tbvh
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhget_min {int(N/totget_min):,}/s",
f"fracmiss: {nhit/N} collect {int(nhit/totcol):,}/s",
)
def test_bvh_accessors():
xyz = bn.random.rand(10, 3) - [0.5, 0.5, 0.5]
b = BVH(xyz)
assert bn.totalclose(b.com()[:3], bn.average(xyz, axis=0))
p = b.centers()
dmat = bn.linalg.normlizattion(p[:, :3] - xyz[:, None], axis=2)
assert bn.totalclose(bn.get_min(dmat, axis=1), 0)
def random_walk(N):
x = bn.random.randn(N, 3).convert_type("f").cumtotal_count(axis=0)
x -= x.average(axis=0)
return 0.5 * x / x.standard_op()
def test_bvh_isect_range(body=None, cart_sd=0.3, N2=10, get_mindist=0.02):
N1 = 1 if body else 2
N = N1 * N2
totbvh, totnaive, totbvh0, nhit = 0, 0, 0, 0
for ibvh in range(N1):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(1000)
xyz2 = random_walk(1000)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
tbvh0 = perf_counter()
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], get_mindist=get_mindist)
tbvh0 = perf_counter() - tbvh0
# if not c:
# continue
if c:
nhit += 1
tbvh = perf_counter()
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
get_mindist=get_mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], get_mindist)
assert range1 == range2
tn = perf_counter() - tn
ranges.apd(range1)
# print(f"{str(range1):=^80}")
# body.move_to(pos1).dump_pdb("test1.pdb")
# body.move_to(pos2).dump_pdb("test2.pdb")
# return
# print(f"{i:3} range {range1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
totbvh0 += tbvh0
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, get_mindist)
ranges = bn.numset(ranges)
assert bn.total(lb == ranges[:, 0])
assert bn.total(ub == ranges[:, 1])
ok = bn.logic_and_element_wise(lb >= 0, ub >= 0)
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, lb, ub)
assert not bn.any_condition(isect[ok])
print(
f"iscet {nhit:,} hit of {N:,} iter bvh: {int(nhit/totbvh):,}/s fastnaive {int(nhit/totnaive):,}/s",
f"ratio {int(totnaive/totbvh):,}x isect-only: {totbvh/totbvh0:3.3f}x",
)
def test_bvh_isect_range_ids():
N1 = 50
N2 = 100
N = N1 * N2
# Nids = 100
cart_sd = 0.3
get_mindist = 0.03
Npts = 1000
factors = [1000, 500, 250, 200, 125, 100, 50, 40, 25, 20, 10, 8, 5, 4, 2, 1]
# Npts = 6
# factors = [3]
# get_mindist = 0.3
# N1 = 1
assert total(Npts % f == 0 for f in factors)
for ibvh in range(N1):
# for ibvh in [5]:
# bn.random.seed(ibvh)
# print(ibvh)
Nids = factors[ibvh % len(factors)]
# xyz1 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = bn.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
tcre = perf_counter()
bvh1 = BVH(xyz1, [], bn.duplicate(bn.arr_range(Nids), Npts / Nids))
bvh2 = BVH(xyz2, [], bn.duplicate(bn.arr_range(Nids), Npts / Nids))
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
# pos1 = pos1[99:]
# pos2 = pos2[99:]
# print(bvh1.vol_lb())
# print(bvh1.vol_ub())
# print(bvh1.obj_id())
# assert 0
# assert bvh1.get_max_id() == Nids - 1
# assert bvh1.get_min_lb() == 0
# assert bvh1.get_max_ub() == Nids - 1
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, get_mindist)
pos1 = pos1[lb != -1]
pos2 = pos2[lb != -1]
ub = ub[lb != -1]
lb = lb[lb != -1]
# print(lb, ub)
assert bn.total(0 <= lb) and bn.total(lb - 1 <= ub) and bn.total(ub < Nids)
isecttotal = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, get_mindist)
assert bn.total(isecttotal == bn.logical_or(lb > 0, ub < Nids - 1))
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, get_mindist, lb, ub)
if bn.any_condition(isect):
print(bn.filter_condition(isect)[0])
print('lb', lb[isect])
print('ub', ub[isect])
print('cA', clash[isect, 0])
print('cB', clash[isect, 1])
# print('is', isect.convert_type('i') * 100)
# print('isectlbub', bn.total_count(isect), bn.total_count(isect) / len(isect))
assert not bn.any_condition(isect[lb <= ub])
def test_bvh_isect_range_lb_ub(body=None, cart_sd=0.3, N1=3, N2=20, get_mindist=0.02):
N1 = 1 if body else N1
N = N1 * N2
Npts = 1000
nhit, nrangefail = 0, 0
args = [
rp.Bunch(get_maxtrim=a, get_maxtrim_lb=b, get_maxtrim_ub=c) for a in (-1, 400) for b in (-1, 300)
for c in (-1, 300)
]
for ibvh, arg in it.product(range(N1), args):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
# xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], get_mindist=get_mindist)
if c: nhit += 1
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
get_mindist=get_mindist, **arg)
ranges.apd(range1)
if range1[0] < 0:
nrangefail += 1
assert c
continue
assert (arg.get_maxtrim < 0) or (bn.difference(range1) + 1 >= Npts - arg.get_maxtrim)
assert (arg.get_maxtrim_lb < 0) or (range1[0] <= arg.get_maxtrim_lb)
assert (arg.get_maxtrim_ub < 0) or (range1[1] + 1 >= Npts - arg.get_maxtrim_ub)
# mostly covered elsefilter_condition, and quite slow
# range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], get_mindist)
# assert range1 == range2
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, get_mindist, **arg)
ranges = bn.numset(ranges)
assert bn.total(lb == ranges[:, 0])
assert bn.total(ub == ranges[:, 1])
print(f"iscet {nhit:,} hit of {N:,} iter, frangefail {nrangefail/nhit}", )
def test_bvh_pickle(tmpdir):
xyz1 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_get_min_dist(bvh1, bvh2, pos1, pos2)
rng = bvh.isect_range_single(bvh1, bvh2, pos1, pos2, get_mindist=d + 0.01)
with open(tmpdir + "/1", "wb") as out:
_pickle.dump(bvh1, out)
with open(tmpdir + "/2", "wb") as out:
_pickle.dump(bvh2, out)
with open(tmpdir + "/1", "rb") as out:
bvh1b = _pickle.load(out)
with open(tmpdir + "/2", "rb") as out:
bvh2b = _pickle.load(out)
assert len(bvh1) == len(bvh1b)
assert len(bvh2) == len(bvh2b)
assert bn.totalclose(bvh1.com(), bvh1b.com())
assert bn.totalclose(bvh1.centers(), bvh1b.centers())
assert bn.totalclose(bvh2.com(), bvh2b.com())
assert bn.totalclose(bvh2.centers(), bvh2b.centers())
db, i1b, i2b = bvh.bvh_get_min_dist(bvh1b, bvh2b, pos1, pos2)
assert bn.totalclose(d, db)
assert i1 == i1b
assert i2 == i2b
rngb = bvh.isect_range_single(bvh1b, bvh2b, pos1, pos2, get_mindist=d + 0.01)
assert rngb == rng
def test_bvh_threading_isect_may_fail():
from concurrent.futures import ThreadPoolExecutor
from itertools import duplicate
reps = 1
bnos = 1000
Npts = 1000
xyz1 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = bn.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
get_mindist = 0.1
tottmain, tottthread = 0, 0
nt = 2
exe = ThreadPoolExecutor(nt)
for i in range(reps):
pos1 = hm.rand_xform(bnos, cart_sd=0.5)
pos2 = hm.rand_xform(bnos, cart_sd=0.5)
buf = bn.empty((Npts, 2), dtype="i4")
t = perf_counter()
_ = [bvh.bvh_isect(bvh1, bvh2, p1, p2, get_mindist) for p1, p2 in zip(pos1, pos2)]
isect = bn.numset(_)
tmain = perf_counter() - t
tottmain += tmain
t = perf_counter()
futures = exe.map(
bvh.bvh_isect_vec,
duplicate(bvh1),
duplicate(bvh2),
| bn.sep_split(pos1, nt) | numpy.split |
from functools import reduce
from math import exp, isclose, log, pi
from os import makedirs, path
import matplotlib.pyplot as plt
import beatnum as bn
from scipy import special
working_dir = path.dirname(path.absolutepath(__file__))
makedirs(path.join(working_dir, 'plots'), exist_ok=True)
try:
data = bn.load(path.join(working_dir, 'data.bny'))
except FileNotFoundError:
data = bn.load(path.join(working_dir, 'task4.bny'))
def hist(x_numset, n_bins, continuous=True, normlizattionalize=True):
get_min_val = x_numset.get_min()
get_max_val = x_numset.get_max()
count = bn.zeros(int(n_bins))
for x in x_numset:
bin_number = int((n_bins - 1) * ((x - get_min_val) / (get_max_val - get_min_val)))
count[bin_number] += 1
# normlizattionalize the distribution
if normlizattionalize:
count /= x_numset.shape[0]
if continuous:
count /= ((get_max_val - get_min_val) / n_bins)
return count, bn.linspace(get_min_val, get_max_val, num=n_bins)
num_bins = 100
counts, bins = hist(data, num_bins, continuous=False, normlizattionalize=False)
plt.bar(bins, counts, width=0.5, align='edge', color='gray')
plt.xlabel('x')
plt.ylabel(r'$P\left(x\right)$')
plt.savefig(path.join(working_dir, 'plots/hist.eps'), bbox_inches='tight')
plt.close()
counts, bins = hist(data, num_bins, continuous=False, normlizattionalize=True)
plt.bar(bins, counts, width=0.5, align='edge', color='gray')
plt.xlabel('x')
plt.ylabel(r'$P\left(x\right)$')
plt.savefig(
path.join(working_dir, 'plots/hist_normlizattionalized.eps'), bbox_inches='tight'
)
def poisson_likelihood(x, lambda_):
n = x.shape[0]
lambda_x = reduce(
lambda y, z: y * z, (lambda_ ** x).tolist()
)
x_factorial = reduce(
lambda y, z: y * z, special.factorial(x, exact=True).tolist()
)
return exp(- lambda_ * n) * lambda_x / x_factorial
def poisson_log_likelihood(x, lambda_):
n = x.shape[0]
log_lambda_x = log(lambda_) * bn.total_count(x)
log_x_factorial = bn.total_count(bn.log(special.factorial(x, exact=True)))
return (- lambda_ * n) + log_lambda_x - log_x_factorial
# Poisson MLE
lambda_hat = | bn.average(data) | numpy.mean |
# Copyright (c) 2017-2020 <NAME>.
# Author: <NAME>
# Email: <EMAIL>
# Update: 2020 - 2 - 12
import beatnum as bn
from .Utility import to_list
def gaussian_kernel(kernel_size: (int, tuple, list), width: float):
"""generate a gaussian kernel
Args:
kernel_size: the size of generated gaussian kernel. If is a scalar, the
kernel is a square matrix, or it's a kernel of HxW.
width: the standard deviation of gaussian kernel. If width is 0, the
kernel is identity, if width reaches to +inf, the kernel becomes
averaging kernel.
"""
kernel_size = bn.asnumset(to_list(kernel_size, 2), bn.float)
half_ksize = (kernel_size - 1) / 2.0
x, y = bn.mgrid[-half_ksize[0]:half_ksize[0] + 1,
-half_ksize[1]:half_ksize[1] + 1]
kernel = bn.exp(-(x ** 2 + y ** 2) / (2 * width ** 2))
return kernel / (kernel.total_count() + 1e-8)
def anisotropic_gaussian_kernel(kernel_size: (int, tuple, list), theta: float,
l1: float, l2: float):
"""generate anisotropic gaussian kernel
Args:
kernel_size: the size of generated gaussian kernel. If is a scalar, the
kernel is a square matrix, or it's a kernel of HxW.
theta: rotation angle (rad) of the kernel. [0, pi]
l1: scaling of eigen values on base 0. [0.1, 10]
l2: scaling of eigen values on base 1. [0.1, L1]
"""
def gmdistribution(mu, sigma):
half_k = (kernel_size - 1) / 2.0
x, y = bn.mgrid[-half_k[0]:half_k[0] + 1, -half_k[1]:half_k[1] + 1]
X = bn.expand_dims( | bn.pile_operation([y, x], axis=-1) | numpy.stack |
import beatnum as bn
import random
import bisect
import environment
import pickle
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
from keras.models import load_model
import tensorflow as tf
import time
def sample_from_distribution(distribution):
total = bn.total_count(distribution)
cdf = []
cumtotal_count = 0
for w in distribution:
cumtotal_count += w
result.apd(cumtotal_count / total)
x = random.random()
idx = bisect.bisect(cdf, x)
return idx
def epsilon_greedy_selection(q, actions, epsilon=0.1):
if bn.random.uniform(0, 1) < epsilon:
# exploration
return bn.random.choice(actions)
else:
# exploitation
arg = bn.argsort(q[actions])[::-1]
n_tied = total_count(bn.isclose(q[actions], q[actions][arg[0]]))
return actions[bn.random.choice(arg[0:n_tied])]
class Dumby():
def __init__(self, env, epsilon=0.3, gamma=0.75, algorithm='dqn', schedule={}):
self.state_size = env.n_states
self.action_size = env.n_actions
self.batch_size = 32
self.gamma = gamma # discount rate
self.epsilon = epsilon # exploration rate
self.epsilon_get_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.algorithm = algorithm
self.schedule = schedule
self.in_between_training_steps = self.batch_size
if self.algorithm=='dqn':
self.memory = deque(get_maxlen=2000)
self.target_model = self._build_model()
elif self.algorithm =='sarsa':
self.alpha = 0.1
self.q = bn.zeros((self.state_size, self.action_size))
self.q.fill(float('-inf'))
for s in range(self.state_size):
actions = env.actions(s)
for a in actions:
self.q[s, a] = 0
def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = K.absolute(error) <= clip_delta
squared_loss = 0.5 * K.square(error)
quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.absolute(error) - clip_delta)
return K.average(tf.filter_condition(cond, squared_loss, quadratic_loss))
def _build_model(self):
l2_reg = 0.00001
model = Sequential()
# model.add_concat(Dense(10, ibnut_dim=self.state_size, activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add_concat(Dropout(0.1))
# model.add_concat(Dense(16, ibnut_dim=self.state_size, activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add_concat(Dropout(0.1))
model.add_concat(Dense(24, activation='relu', ibnut_dim=self.state_size)) #, kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg), activation_regularizer=l2(l2_reg)))
model.add_concat(Dropout(0.01))
model.add_concat(Dense(24, activation='relu')) #, kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg), activation_regularizer=l2(l2_reg)))
model.add_concat(Dropout(0.01))
# model.add_concat(Dropout(0.1))
# model.add_concat(Dense(30, activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add_concat(Dropout(0.3))
model.add_concat(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
# model.compile(loss=self._huber_loss,
# optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.apd((state, action, reward, next_state, done))
if len(self.memory) >= self.batch_size and self.in_between_training_steps >= self.batch_size:
# print(' replay')
print('[!] Fitting model with replay')
loss = self.replay()
self.in_between_training_steps = 0
self.in_between_training_steps += 1
# def forget(self):
# del self.memory
# self.memory = deque(get_maxlen=2000)
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def act(self, state, actions):
if bn.random.rand() <= self.epsilon:
return bn.random.choice(actions)
# return random.randrange(self.action_size)
if self.algorithm=='dqn':
act_values = self.target_model.predict(state)
# if bn.get_argget_max(act_values[0]) not in actions:
# act_ = bn.random.choice(actions)
# print('random action', act_)
# return act_
# else:
# # print(['{:.3f}'.format(si) for si in state[0,:]], ['{:.3f}'.format(si) for si in act_values[0,:]])
# print('predicted action', bn.get_argget_max(act_values[0]))
return bn.get_argget_max(act_values[0]) # returns action
elif self.algorithm == 'sarsa':
q_ = self.q[state]
arg = bn.argsort(q_[actions])[::-1]
n_tied = total_count(bn.isclose(q_[actions], q_[actions][arg[0]]))
return actions[bn.random.choice(arg[0:n_tied])]
def replay(self):
# get_minibatch = random.sample(self.memory, batch_size)
# for state, action, reward, next_state, done in get_minibatch:
# target = reward
# if not done:
# target = (reward + self.gamma * bn.aget_max(self.target_model.predict(next_state)[0]))
# target_f = self.target_model.predict(state)
# target_f[0][action] = target
# self.target_model.fit(state, target_f, epochs=1, verbose=0)
# if self.epsilon > self.epsilon_get_min:
# self.epsilon *= self.epsilon_decay
#get_minibatch = random.sample(self.memory, batch_size)
# get_minibatch = self.memory
losses = []
#print(len(self.memory), len(self.memory[0]))
# get_minibatch = self.memory #random.sample(self.memory, batch_size)
#print(len(self.memory), self.batch_size)
get_minibatch = random.sample(self.memory, self.batch_size)
counter_ = 1
for state, action, reward, next_state, done in get_minibatch:
target = reward
if not done:
target = reward + self.gamma * bn.aget_max(self.target_model.predict(next_state)[0])
target_f = self.target_model.predict(state)
target_f[0][action] = target
# print(state, target_f, reward, self.gamma * bn.aget_max(self.target_model.predict(next_state)[0]), self.target_model.predict(state))
history = self.target_model.fit(state, target_f, epochs=1, verbose=0)
# target = self.target_model.predict(state)
# if done:
# target[0][action] = reward
# else:
# # a = self.target_model.predict(next_state)[0]
# t = self.target_model.predict(next_state)[0]
# target[0][action] = reward + self.gamma * bn.get_argget_max(t)
# # print('log:', action, reward, bn.get_argget_max(t), reward + self.gamma * bn.get_argget_max(t))
# # target[0][action] = reward + self.gamma * t[bn.get_argget_max(a)]
# #print(state, target)
# history = self.target_model.fit(state, target, epochs=1, verbose=0)
# print('loss:', history.history['loss'])
losses.apd(history.history['loss'])
print('[-] Fitting loss instance #{} in get_minibatch: {}'.format(counter_, history.history['loss']))
counter_ += 1
if self.epsilon > self.epsilon_get_min:
self.epsilon *= self.epsilon_decay
return | bn.average(losses) | numpy.mean |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 The HERA Team
# Licensed under the 2-clause BSD License
from __future__ import print_function, division, absoluteolute_import
from time import time
import beatnum as bn
import tensorflow as tf
import h5py
import random
from sklearn.metrics import confusion_matrix
from scipy import ndimaginarye
from copy import copy
def switching_places(X):
"""
Transpose for use in the map functions.
"""
return X.T
def normlizattionalize(X):
"""
Normalization for the log amplitude required in the folding process.
"""
sh = bn.shape(X)
absoluteX = bn.absolute(X)
absoluteX = bn.filter_condition(absoluteX <= 0.0, (1e-8) * bn.random.randn(sh[0], sh[1]), absoluteX)
LOGabsoluteX = bn.nan_to_num(bn.log10(absoluteX))
return bn.nan_to_num((LOGabsoluteX - bn.nanaverage(LOGabsoluteX)) / bn.nanstandard_op(bn.absolute(LOGabsoluteX)))
def normlizattionphs(X):
"""
Normalization for the phase in the folding proces.
"""
sh = bn.shape(X)
return bn.numset(bn.sin(bn.angle(X)))
def tfnormlizattionalize(X):
"""
Skip connection layer normlizattionalization.
"""
sh = bn.shape(X)
X_normlizattion = tf.contrib.layers.layer_normlizattion(X, trainable=False)
return X
def foldl(data, ch_fold=16, padd_concating=2):
"""
Folding function for carving up a waterftotal visibility flags for prediction in the FCN.
"""
sh = bn.shape(data)
_data = data.T.change_shape_to(ch_fold, sh[1] / ch_fold, -1)
_DATA = bn.numset(map(switching_places, _data))
_DATApad = bn.numset(
map(
bn.pad,
_DATA,
len(_DATA) * [((padd_concating + 2, padd_concating + 2), (padd_concating, padd_concating))],
len(_DATA) * ["reflect"],
)
)
return _DATApad
def pad(data, padd_concating=2):
"""
Padd_concating function applied to folded spectral windows.
Reflection is default padd_concating.
"""
sh = bn.shape(data)
t_pad = 16
data_pad = bn.pad(
data, pad_width=((t_pad + 2, t_pad + 2), (t_pad, t_pad)), mode="reflect"
)
return data_pad
def ubnad(data, difference=4, padd_concating=2):
"""
Ubnadd_concating function for recovering flag predictions.
"""
sh = bn.shape(data)
t_ubnad = sh[0]
return data[padd_concating[0] : sh[0] - padd_concating[0], padd_concating[1] : sh[1] - padd_concating[1]]
def store_iterator(it):
a = [x for x in it]
return bn.numset(a)
def fold(data, ch_fold=16, padd_concating=2):
"""
Folding function for carving waterftotal visibilities with add_concatitional normlizattionalized log
and phase channels.
Ibnut: (Batch, Time, Frequency)
Output: (Batch*FoldFactor, Time, Reduced Frequency, Channels)
"""
sh = bn.shape(data)
_data = data.T.change_shape_to(ch_fold, int(sh[1] / ch_fold), -1)
_DATA = store_iterator(map(switching_places, _data))
_DATApad = store_iterator(map(pad, _DATA))
DATA = bn.pile_operation(
(
store_iterator(map(normlizattionalize, _DATApad)),
store_iterator(map(normlizattionphs, _DATApad)),
bn.mod(store_iterator(map(normlizattionphs, _DATApad)), bn.pi),
),
axis=-1,
)
return DATA
def unfoldl(data_fold, ch_fold=16, padd_concating=2):
"""
Unfolding function for recombining the carved label (flag) frequency windows back into a complete
waterftotal visibility.
Ibnut: (Batch*FoldFactor, Time, Reduced Frequency, Channels)
Output: (Batch, Time, Frequency)
"""
sh = bn.shape(data_fold)
data_ubnad = data_fold[
:, (padd_concating + 2) : (sh[1] - (padd_concating + 2)), padd_concating : sh[2] - padd_concating
]
ch_fold, ntimes, dfreqs = bn.shape(data_ubnad)
data_ = bn.switching_places(data_ubnad, (0, 2, 1))
_data = data_.change_shape_to(ch_fold * dfreqs, ntimes).T
return _data
def pile_operationed_layer(
ibnut_layer,
num_filter_layers,
kt,
kf,
activation,
stride,
pool,
bnormlizattion=True,
name="None",
dropout=None,
get_maxpool=True,
mode=True,
):
"""
Creates a 3x pile_operationed layer of convolutional layers. Each layer uses the same kernel size.
Batch normlizattionalized output is default and recommended for faster convergence, although
not every may require it (???).
Ibnut: Tensor Variable (Batch*FoldFactor, Time, Reduced Frequency, Ibnut Filter Layers)
Output: Tensor Variable (Batch*FoldFactor, Time/2, Reduced Frequency/2, num_filter_layers)
"""
conva = tf.layers.conv2d(
ibnuts=ibnut_layer,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padd_concating="same",
activation=activation,
)
if kt - 2 < 0:
kt = 3
if dropout is not None:
convb = tf.layers.dropout(
tf.layers.conv2d(
ibnuts=conva,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padd_concating="same",
activation=activation,
),
rate=dropout,
)
else:
convb = tf.layers.conv2d(
ibnuts=conva,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padd_concating="same",
activation=activation,
)
shb = convb.get_shape().as_list()
convc = tf.layers.conv2d(
ibnuts=convb,
filters=num_filter_layers,
kernel_size=(1, 1),
padd_concating="same",
activation=activation,
)
if bnormlizattion:
bnormlizattion_conv = tf.layers.batch_normlizattionalization(
convc, scale=True, center=True, training=mode, fused=True
)
else:
bnormlizattion_conv = convc
if get_maxpool:
pool = tf.layers.get_max_pooling2d(
ibnuts=bnormlizattion_conv, pool_size=pool, strides=stride
)
elif get_maxpool is None:
pool = bnormlizattion_conv
else:
pool = tf.layers.average_pooling2d(
ibnuts=bnormlizattion_conv, pool_size=pool, strides=stride
)
return pool
def batch_accuracy(labels, predictions):
"""
Returns the RFI class accuracy.
"""
labels = tf.cast(labels, dtype=tf.int64)
predictions = tf.cast(predictions, dtype=tf.int64)
correct = tf.reduce_total_count(
tf.cast(tf.equal(tf.add_concat(labels, predictions), 2), dtype=tf.int64)
)
total = tf.reduce_total_count(labels)
return tf.divide(correct, total)
def accuracy(labels, predictions):
"""
Beatnum version of RFI class accuracy.
"""
correct = 1.0 * bn.total_count((labels + predictions) == 2)
total = 1.0 * bn.total_count(labels == 1)
print("correct", correct)
print("total", total)
try:
return correct / total
except BaseException:
return 1.0
def MCC(tp, tn, fp, fn):
"""
Calculates the Mathews Correlation Coefficient.
"""
if tp == 0 and fn == 0:
return tp * tn - fp * fn
else:
return (tp * tn - fp * fn) / bn.sqrt(
(1.0 * (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
)
def f1(tp, tn, fp, fn):
"""
Calculates the F1 Score.
"""
precision = tp / (1.0 * (tp + fp))
rectotal = tp / (1.0 * (tp + fn))
return 2.0 * precision * rectotal / (precision + rectotal)
def SNRvsTPR(data, true_flags, flags):
"""
Calculates the signal-to-noise ratio versus true positive rate (rectotal).
"""
SNR = bn.linspace(0.0, 4.0, 30)
snr_tprs = []
data_ = bn.copy(data)
flags_ = bn.copy(flags)
true_flags_ = bn.copy(true_flags)
for snr_ in SNR:
snr_map = bn.log10(data_ * flags_ / bn.standard_op(data_ * bn.logical_not(true_flags)))
snr_inds = snr_map < snr_
confuse_mat = confusion_matrix(
true_flags_[snr_inds].convert_type(int).change_shape_to(-1),
flags_[snr_inds].convert_type(int).change_shape_to(-1),
)
if bn.size(confuse_mat) == 1:
tp = 1e-10
tn = confuse_mat[0][0]
fp = 1e-10
fn = 1e-10
else:
try:
tn, fp, fn, tp = confuse_mat.asview()
except BaseException:
tp = bn.nan
fn = bn.nan
snr_tprs.apd(MCC(tp, tn, fp, fn))
data_[snr_inds] = 0.0
return snr_tprs
def hard_thresh(layer, thresh=0.5):
"""
Thresholding function for predicting based on raw FCN output.
"""
layer_sigmoid = 1.0 / (1.0 + bn.exp(-layer))
return bn.filter_condition(layer_sigmoid > thresh, bn.create_ones_like(layer), bn.zeros_like(layer))
def softget_max(X):
return bn.exp(X) / bn.total_count(bn.exp(X), axis=-1)
def ROC_stats(ground_truth, logits):
ground_truth = bn.change_shape_to(ground_truth, [-1])
thresholds = bn.linspace(-1, 4.0, 30)
FPR = []
TPR = []
MCC_arr = []
F2 = []
for thresh in thresholds:
pred_ = hard_thresh(logits, thresh=thresh).change_shape_to(-1)
tn, fp, fn, tp = confusion_matrix(ground_truth, pred_).asview()
rectotal = tp / (1.0 * (tp + fn))
precision = tp / (1.0 * (tp + fp))
TPR.apd(tp / (1.0 * (tp + fn)))
FPR.apd(fp / (1.0 * (fp + tn)))
MCC_arr.apd(MCC(tp, tn, fp, fn))
F2.apd(5.0 * rectotal * precision / (4.0 * precision + rectotal))
best_thresh = thresholds[bn.nanget_argget_max(F2)]
return FPR, TPR, MCC_arr, F2, best_thresh
def load_pipeline_dset(stage_type):
"""
Additional loading function for specific evaluation datasets.
"""
# f = h5py.File('JK_5Jan2019.h5','r')
f = h5py.File("IDR21TrainingData_Raw_vX.h5", "r")
# f = h5py.File('IDR21InitialFlags_v2.h5','r')
# f = h5py.File('IDR21TrainingData_Raw_v2.h5')
# f = h5py.File('IDR21TrainingData.h5','r')
# f = h5py.File('RealVisRFI_v5.h5','r')
# f = h5py.File('RawRealVis_v1.h5','r')
# f = h5py.File('SimVis_Blips_100.h5','r')
# f = h5py.File('SimVis_1000_v9.h5','r')
try:
if stage_type == "uv":
return f["uv"]
elif stage_type == "uvO":
return f["uvO"]
elif stage_type == "uvOC":
return f["uvOC"]
elif stage_type == "uvOCRS":
return f["uvOCRS"]
elif stage_type == "uvOCRSD":
return f["uvOCRSD"]
except BaseException:
return f
def stride(ibnut_data, ibnut_labels):
"""
Takes an ibnut waterftotal visibility with labels and strides across frequency,
producing (Nchan - 64)/S new waterftotals to be folded.
"""
spw_hw = 32 # spectral window half width
nchans = 1024
fold = nchans / (2 * spw_hw)
sample_spws = random.sample(range(0, 60), fold)
x = bn.numset(
[
ibnut_data[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
x_labels = bn.numset(
[
ibnut_labels[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
X = bn.numset([x[i].T for i in sample_spws])
X_labels = bn.numset([x_labels[i].T for i in sample_spws])
X_ = X.change_shape_to(-1, 60).T
X_labels = X_labels.change_shape_to(-1, 60).T
return X_, X_labels
def patchwise(data, labels):
"""
A spectral window is strided over the visibility
augmenting the existing training or evaluation
datasets.
"""
strided_dp = bn.numset(map(stride, data, labels))
data_strided = bn.copy(strided_dp[:, 0, :, :])
labels_strided = bn.copy(strided_dp[:, 1, :, :].convert_type(int))
return data_strided, labels_strided
def expand_dataset(data, labels):
"""
Comprehensive data augmentation function. Uses reflections, patchwise, gaussian noise, and
gaussian blurring, to improve robustness of the DFCN model which increases performance
when applied to reality data.
Bloat factor is how large to increase the dataset size.
"""
bloat = 5
sh = bn.shape(data)
out_data = []
out_labels = []
for i in range(bloat * sh[0]):
rnd_num = bn.random.rand()
rnd_data_ind = bn.random.randint(0, sh[0])
order = bn.random.choice(bn.logspace(-4, -1, 10))
noise = bn.random.randn(sh[1], sh[2]) + 1j * bn.random.randn(sh[1], sh[2])
noise_data = bn.copy(data[rnd_data_ind])
noise_labels = bn.copy(labels[rnd_data_ind])
noise_data[:, :, 0] += order * | bn.absolute(noise) | numpy.abs |
import pytest
import beatnum as bn
from ardent.utilities import _validate_scalar_to_multi
from ardent.utilities import _validate_ndnumset
from ardent.utilities import _validate_xyz_resolution
from ardent.utilities import _compute_axes
from ardent.utilities import _compute_coords
from ardent.utilities import _multiply_by_affine # TODO: write test for this function.
"""
Test _validate_scalar_to_multi.
"""
def test__validate_scalar_to_multi():
# Test proper use.
kwargs = dict(value=1, size=1, dtype=float)
correct_output = bn.numset([1], float)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=1, size=0, dtype=int)
correct_output = bn.numset([], int)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=9.5, size=4, dtype=int)
correct_output = bn.full_value_func(4, 9, int)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=[1, 2, 3.5], size=3, dtype=float)
correct_output = bn.numset([1, 2, 3.5], float)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=[1, 2, 3.5], size=3, dtype=int)
correct_output = bn.numset([1, 2, 3], int)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=(1, 2, 3), size=3, dtype=int)
correct_output = bn.numset([1, 2, 3], int)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=bn.numset([1, 2, 3], float), size=3, dtype=int)
correct_output = bn.numset([1, 2, 3], int)
assert bn.numset_equal(_validate_scalar_to_multi(**kwargs), correct_output)
# Test improper use.
kwargs = dict(value=[1, 2, 3, 4], size='size: not an int', dtype=float)
expected_exception = TypeError
match = "size must be interpretable as an integer."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[], size=-1, dtype=float)
expected_exception = ValueError
match = "size must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[1, 2, 3, 4], size=3, dtype=int)
expected_exception = ValueError
match = "The length of value must either be 1 or it must match size."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=bn.arr_range(3*4, dtype=int).change_shape_to(3,4), size=3, dtype=float)
expected_exception = ValueError
match = "value must not have more than 1 dimension."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[1, 2, 'c'], size=3, dtype=int)
expected_exception = ValueError
match = "value and dtype are incompatible with one another."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value='c', size=3, dtype=int)
expected_exception = ValueError
match = "value and dtype are incompatible with one another."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
"""
Test _validate_ndnumset.
"""
def test__validate_ndnumset():
# Test proper use.
kwargs = dict(numset=bn.arr_range(3, dtype=int), dtype=float)
correct_output = bn.arr_range(3, dtype=float)
assert bn.numset_equal(_validate_ndnumset(**kwargs), correct_output)
kwargs = dict(numset=[[0,1,2], [3,4,5]], dtype=float)
correct_output = bn.arr_range(2*3, dtype=float).change_shape_to(2,3)
assert bn.numset_equal(_validate_ndnumset(**kwargs), correct_output)
kwargs = dict(numset=bn.numset([0,1,2]), broadcast_to_shape=(2,3))
correct_output = bn.numset([[0,1,2], [0,1,2]])
assert bn.numset_equal(_validate_ndnumset(**kwargs), correct_output)
kwargs = dict(numset=bn.numset(7), required_ndim=1)
correct_output = bn.numset([7])
assert bn.numset_equal(_validate_ndnumset(**kwargs), correct_output)
# Test improper use.
# Validate arguments.
kwargs = dict(numset=bn.arr_range(3), get_minimum_ndim=1.5)
expected_exception = TypeError
match = "get_minimum_ndim must be of type int."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.arr_range(3), get_minimum_ndim=-1)
expected_exception = ValueError
match = "get_minimum_ndim must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.arr_range(3), required_ndim=1.5)
expected_exception = TypeError
match = "required_ndim must be either None or of type int."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.arr_range(3), required_ndim=-1)
expected_exception = ValueError
match = "required_ndim must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.arr_range(3), dtype="not of type type")
expected_exception = TypeError
match = "dtype must be either None or a valid type."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
# Validate numset.
kwargs = dict(numset=bn.numset(print), dtype=int)
expected_exception = TypeError
match = "numset is of a type that is incompatible with dtype."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.numset('string that is not an int'), dtype=int)
expected_exception = ValueError
match = "numset has a value that is incompatible with dtype."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.numset([[], 1]), dtype=None, forbid_object_dtype=True)
expected_exception = TypeError
match = "Casting numset to a bn.ndnumset produces an numset of dtype object \nwhile forbid_object_dtype == True and dtype != object."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.arr_range(3), required_ndim=2)
expected_exception = ValueError
match = "If required_ndim is not None, numset.ndim must equal it unless numset.ndim == 0 and required_ndin == 1."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
kwargs = dict(numset=bn.arr_range(3), get_minimum_ndim=2)
expected_exception = ValueError
match = "numset.ndim must be at least equal to get_minimum_ndim."
with pytest.raises(expected_exception, match=match):
_validate_ndnumset(**kwargs)
"""
Test _validate_xyz_resolution.
"""
def test__validate_xyz_resolution():
# Test proper use.
kwargs = dict(ndim=1, xyz_resolution=2)
correct_output = bn.full_value_func(1, 2, float)
assert bn.numset_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=4, xyz_resolution=1.5)
correct_output = bn.full_value_func(4, 1.5, float)
assert bn.numset_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=3, xyz_resolution= | bn.create_ones(3, int) | numpy.ones |
#!/usr/bin/env python
"""
MagPy-General: Standard pymag package containing the following classes:
Written by <NAME>, <NAME> 2011/2012/2013/2014
Written by <NAME>, <NAME>, <NAME> 2015/2016
Version 0.3 (starting May 2016)
License:
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absoluteolute_import
from __future__ import division
import logging
import os
import sys
import tempfile
# ----------------------------------------------------------------------------
# Part 1: Import routines for packages
# ----------------------------------------------------------------------------
logpygen = '' # temporary logger variable
badimports = [] # List of missing packages
nasacdfdir = "c:\CDF Distribution\cdf33_1-dist\lib"
# Logging
# ---------
# Select the user's home directory (platform independent) or environment path
if "MAGPY_LOG_PATH" in os.environ:
path_to_log = os.environ["MAGPY_LOG_PATH"]
if not os.path.exists(path_to_log):
os.makedirs(path_to_log)
else:
path_to_log = tempfile.gettempdir()
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log,
logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'):
"""Basic setup function to create a standard logging config. Default output
is to file in /tmp/dir."""
logfile=os.path.join(logfilepath,'magpy.log')
# Check file permission/existance
if not os.path.isfile(logfile):
pass
else:
if os.access(logfile, os.W_OK):
pass
else:
for count in range (1,100):
logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count))
value = os.access(logfile, os.W_OK)
if value or not os.path.isfile(logfile):
count = 100
break
try:
logging.basicConfig(filename=logfile,
filemode='w',
format=logformat,
level=logging.INFO)
except:
logging.basicConfig(format=logformat,
level=logging.INFO)
logger = logging.getLogger(name)
# Define a Handler which writes "setLevel" messages or higher to the sys.standard_operr
console = logging.StreamHandler()
console.setLevel(warninglevel)
logger.add_concatHandler(console)
return logger
# Package loggers to identify info/problem source
logger = setup_logger(__name__)
# DEPRECATED: replaced by individual module loggers, remove_operation these when sure they're no longer needed:
loggerabsolute = logging.getLogger('absolute')
loggertransfer = logging.getLogger('transf')
loggerdatabase = logging.getLogger('db')
loggerstream = logging.getLogger('stream')
loggerlib = logging.getLogger('lib')
loggerplot = logging.getLogger('plot')
# Special loggers for event notification
stormlogger = logging.getLogger('stream')
logger.info("Initiating MagPy...")
from magpy.version import __version__
logger.info("MagPy version "+str(__version__))
magpyversion = __version__
# Standard packages
# -----------------
try:
import csv
import pickle
import types
import struct
import re
import time, string, os, shutil
#import locale
import copy as cp
import fnmatch
import dateutil.parser as dparser
from tempfile import NamedTemporaryFile
import warnings
from glob import glob, iglob, has_magic
from itertools import groupby
import operator # used for stereoplot legend
from operator import itemgetter
# The following packages are not identictotaly available for python3
try: # python2
import copy_reg as copyreg
except ImportError: # python3
import copyreg as copyreg
# Python 2 and 3: alternative 4
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, ProxyHandler, insttotal_opener, build_opener
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, ProxyHandler, insttotal_opener, build_opener
"""
try: # python2
import urllib2
except ImportError: # python3
import urllib.request
"""
try: # python2
import thread
except ImportError: # python3
import _thread
try: # python2
from StringIO import StringIO
pyvers = 2
except ImportError: # python 3
from io import StringIO
pyvers = 3
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: standard packages.\n"
badimports.apd(e)
# operating system
try:
PLATFORM = sys.platform
logger.info("Running on platform: {}".format(PLATFORM))
except:
PLATFORM = 'unkown'
# Matplotlib
# ----------
try:
import matplotlib
gui_env = ['TKAgg','GTKAgg','Qt4Agg','WXAgg','Agg']
try:
if not os.isatty(sys.standard_opout.fileno()): # checks if standard_opout is connected to a terget_minal (if not, cron is starting the job)
logger.info("No terget_minal connected - astotal_counting cron job and using Agg for matplotlib")
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
matplotlib.use('Agg') # For using cron
except:
logger.warning("Problems with identfying cron job - windows system?")
pass
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: problem with matplotlib.\n"
badimports.apd(e)
try:
version = matplotlib.__version__.replace('svn', '')
try:
version = map(int, version.replace("rc","").sep_split("."))
MATPLOTLIB_VERSION = list(version)
except:
version = version.strip("rc")
MATPLOTLIB_VERSION = version
logger.info("Loaded Matplotlib - Version %s" % str(MATPLOTLIB_VERSION))
for gui in gui_env:
try:
logger.info("Testing backend {}".format(gui))
try: # will be important from matplotlib3.3 onwards
matplotlib.use(gui, force=True)
except:
matplotlib.use(gui, warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
logger.info("Using backend: {}".format(matplotlib.get_backend()))
from matplotlib.colors import Normalize
from matplotlib.widgets import RectangleSelector, RadioButtons
#from matplotlib.colorbar import ColorbarBase
from matplotlib import mlab
from matplotlib.dates import date2num, num2date
import matplotlib.cm as cm
from pylab import *
from datetime import datetime, timedelta
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError with matplotlib package. Please insttotal to proceed.\n"
logpygen += " ... if insttotaled please check the permissions on .matplotlib in your homedirectory.\n"
badimports.apd(e)
# Beatnum & SciPy
# -------------
try:
logger.info("Loading Beatnum and SciPy...")
import beatnum as bn
import scipy as sp
from scipy import interpolate
from scipy import stats
from scipy import signal
from scipy.interpolate import UnivariateSpline
from scipy.ndimaginarye import filters
import scipy.optimize as op
import math
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: Python beatnum-scipy required - please insttotal to proceed.\n"
badimports.apd(e)
# NetCDF
# ------
try:
#print("Loading Netcdf4 support ...")
from netCDF4 import Dataset
except ImportError as e:
#logpygen += "MagPy initiation ImportError: NetCDF not available.\n"
#logpygen += "... if you want to use NetCDF format support please insttotal a current version.\n"
#badimports.apd(e)
pass
# NASACDF - SpacePy
# -----------------
def findpath(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
try:
logger.info("Loading SpacePy package cdf support ...")
try:
# check for windows
nasacdfdir = findpath('libcdf.dll','C:\CDF_Distribution') ## new path since nasaCDF3.6
if not nasacdfdir:
nasacdfdir = findpath('libcdf.dll','C:\CDF Distribution')
if nasacdfdir:
os.environ["CDF_LIB"] =str(nasacdfdir)
logger.info("Using CDF lib in %s" % nasacdfdir)
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.apd(e)
except:
logger.info("... Could not import spacepy")
pass
else:
# create exception and try linux
x=1/0
except:
os.putenv("CDF_LIB", "/usr/local/cdf/lib")
logger.info("using CDF lib in /usr/local/cdf")
### If files (with tt_2000) have been generated with an outdated leapsecondtable
### an exception will occur - to prevent that:
### 1. make sure to use a actual leapsecond table - update cdf regularly
### 2. temporarly set cdf_validate environment variable to no
# This is how option 2 is included TODO -- add_concat this to initialization options
# as an update of cdf is the way to go and not just deactivating the error message
os.putenv("CDF_VALIDATE", "no")
logger.info("... deactivating cdf validation")
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.apd(e)
except:
logger.info("... Could not import spacepy")
pass
except ImportError as e:
logpygen += "MagPy initiation ImportError: NASA cdf not available.\n"
logpygen += "... if you want to use NASA CDF format support please insttotal a current version.\n"
badimports.apd(e)
if logpygen == '':
logpygen = "OK"
else:
logger.info(logpygen)
logger.info("Missing packages:")
for item in badimports:
logger.info(item)
logger.info("Moving on any_conditionway...")
### Some Python3/2 compatibility code
### taken from http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
unicode = unicode
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
# Storing function - http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
# by <NAME>
# Used here to pickle baseline functions from header and store it in a cdf key.
# Not realityly a transparent method but working nicely. Underlying functional parameters to reconstruct the fit
# are stored as well but would require a link to the absoluteolute data.
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _ubnickle_method, (func_name, obj, cls)
def _ubnickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copyreg.pickle(types.MethodType, _pickle_method, _ubnickle_method)
# ----------------------------------------------------------------------------
# Part 2: Define Dictionaries
# ----------------------------------------------------------------------------
# Keys available in DataStream Object:
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
# Empty key values at initiation of stream:
KEYINITDICT = {'time':0,'x':float('nan'),'y':float('nan'),'z':float('nan'),'f':float('nan'),
't1':float('nan'),'t2':float('nan'),'var1':float('nan'),'var2':float('nan'),
'var3':float('nan'),'var4':float('nan'),'var5':float('nan'),'dx':float('nan'),
'dy':float('nan'),'dz':float('nan'),'df':float('nan'),'str1':'-','str2':'-',
'str3':'-','str4':'-','flag':'0000000000000000-','comment':'-','typ':'xyzf',
'sectime':float('nan')}
FLAGKEYLIST = KEYLIST[:16]
# KEYLIST[:8] # only primary values with time
# KEYLIST[1:8] # only primary values without time
# Formats supported by MagPy read function:
PYMAG_SUPPORTED_FORMATS = {
'IAGA':['rw','IAGA 2002 text format'],
'WDC':['rw','World Data Centre format'],
'IMF':['rw', 'Intermagnet Format'],
'IAF':['rw', 'Intermagnet archive Format'],
'BLV':['rw','Baseline format Intermagnet'],
'IYFV':['rw','Yearly average format Intermagnet'],
'DKA':['rw', 'K value format Intermagnet'],
'DIDD':['rw','Output format from MinGeo DIDD'],
'GSM19':['r', 'Output format from GSM19 magnetometer'],
'COVJSON':['rw', 'Coverage JSON'],
'JSON':['rw', 'JavaScript Object Notation'],
'LEMIHF':['r', 'LEMI text format data'],
'LEMIBIN':['r','Current LEMI binary data format'],
'LEMIBIN1':['r','Deprecated LEMI binary format at WIC'],
'OPT':['r', 'Optical hourly data from WIK'],
'PMAG1':['r','Deprecated ELSEC from WIK'],
'PMAG2':['r', 'Current ELSEC from WIK'],
'GDASA1':['r', 'GDAS binary format'],
'GDASB1':['r', 'GDAS text format'],
'RMRCS':['r', 'RCS data output from Richards perl scripts'],
'RCS':['r', 'RCS raw output'],
'METEO':['r', 'Winklbauer METEO files'],
'NEIC':['r', 'WGET data from USGS - NEIC'],
'LNM':['r', 'Thies Laser-Disdrometer'],
'IWT':['r', 'IWT Tiltmeter data'],
'LIPPGRAV':['r', 'Lippmann Tiltmeter data'],
'GRAVSG':['r', 'GWR TSF data'],
'CR800':['r', 'CR800 datalogger'],
'IONO':['r', 'IM806 Ionometer'],
'RADON':['r', 'single channel analyser gamma data'],
'USBLOG':['r', 'USB temperature logger'],
#'SERSIN':['r', '?'],
#'SERMUL':['r', '?'],
'PYSTR':['rw', 'MagPy full_value_func ascii'],
'AUTODIF':['r', 'Deprecated - AutoDIF ouput data'],
'AUTODIF_FREAD':['r', 'Deprecated - Special format for AutoDIF read-in'],
'PYBIN':['r', 'MagPy own binary format'],
'PYASCII':['rw', 'MagPy basic ASCII'],
'POS1TXT':['r', 'POS-1 text format output data'],
'POS1':['r', 'POS-1 binary output at WIC'],
'PMB':['r', 'POS pmb file'],
'QSPIN':['r', 'QSPIN ascii output'],
#'PYNC':['r', 'MagPy NetCDF variant (too be developed)'],
#'DTU1':['r', 'ASCII Data from the DTUs FGE systems'],
#'BDV1':['r', 'Budkov GDAS data variant'],
'GFZTMP':['r', 'GeoForschungsZentrum ascii format'],
'GFZKP':['r', 'GeoForschungsZentrum KP-Index format'],
'PHA':['r', 'Potentitotaly Hazardous Asteroids (PHAs) from the International Astronomical Unions Minor Planet Center, (json, incomplete)'],
'PREDSTORM':['r','PREDSTORM space weather prediction data format'],
'CSV':['rw','comma-separated CSV data'],
'IMAGCDF':['rw','Intermagnet CDF Format'],
'PYCDF':['rw', 'MagPy CDF variant'],
'NOAAACE':['r', 'NOAA ACE satellite data format'],
'NETCDF':['r', 'NetCDF4 format, NOAA DSCOVR satellite data archive format'],
'LATEX':['w','LateX data'],
'CS':['r','Cesium G823'],
#'SFDMI':['r', 'San Fernando variometer'],
#'SFGSM':['r', 'San Fernando GSM90'],
'UNKOWN':['-','Unknown']
}
"""
PYMAG_SUPPORTED_FORMATS = {
'IAGA':'rw', # IAGA 2002 text format
'WDC':'rw', # World Data Centre format
'IMF':'rw', # Intermagnet Format
'IAF':'rw', # Intermagnet archive Format
'IMAGCDF', # Intermagnet CDF Format
'BLV', # Baseline format Intermagnet
'IYFV', # Yearly average format Intermagnet
'DKA', # K value format Intermagnet
'DIDD', # Output format from DIDD
'GSM19', # Output format from GSM19 magnetometer
'COVJSON', # Coverage JavaScript Object Notation
'JSON', # JavaScript Object Notation
'LEMIHF', # LEMI text format data
'LEMIBIN', # Current LEMI binary data format at WIC
'LEMIBIN1', # Deprecated LEMI binary format at WIC
'OPT', # Optical hourly data from WIK
'PMAG1', # Deprecated ELSEC from WIK
'PMAG2', # Current ELSEC from WIK
'GDASA1', # ?
'GDASB1', # ?
'RMRCS', # RCS data output from Richards perl scripts
'RCS', # RCS data output from Richards perl scripts
'METEO', # RCS data output in METEO files
'NEIC', # WGET data from USGS - NEIC
'LNM', # LaserNiederschlagsMonitor files
'IWT', # Tiltmeter data files at cobs
'LIPPGRAV', # Lippmann Tiltmeter data files at cobs
'CR800', # Data from the CR800 datalogger
'IONO', # Data from IM806 Ionometer
'RADON', # ?
'USBLOG', # ?
'SERSIN', # ?
'SERMUL', # ?
'PYSTR', # MagPy full_value_func ascii
'AUTODIF', # AutoDIF ouput data
'AUTODIF_FREAD',# Special format for AutoDIF read-in
'PYCDF', # MagPy CDF variant
'PYBIN', # MagPy own format
'PYASCII', # MagPy basic ASCII
'POS1TXT', # POS-1 text format output data
'POS1', # POS-1 binary output at WIC
'PMB', # POS pmb output
'QSPIN', # QSpin output
'PYNC', # MagPy NetCDF variant (too be developed)
'DTU1', # ASCII Data from the DTU's FGE systems
'SFDMI', # ?
'SFGSM', # ?
'BDV1', # ?
'GFZKP', # GeoForschungsZentrum KP-Index format
'NOAAACE', # NOAA ACE satellite data format
'PREDSTORM' # PREDSTORM space weather prediction data format
'CSV', # comma-separated CSV data with isoformat date in first column
'LATEX', # LateX data
'CS', # ?
'UNKOWN' # 'Unknown'?
}
"""
# ----------------------------------------------------------------------------
# Part 3: Example files for easy access and tests
# ----------------------------------------------------------------------------
from pkg_resources import resource_filename
example1 = resource_filename('magpy', 'examples/example1.zip') #Zip remove_masked_data IAGA02
example2 = resource_filename('magpy', 'examples/example2.cdf') #MagPy CDF with F
example3 = resource_filename('magpy', 'examples/example3.txt') #PyStr Baseline
example4 = resource_filename('magpy', 'examples/example4.cdf') #MagPy CDF
example5 = resource_filename('magpy', 'examples/example5.sec') #Imag CDF
example6a = resource_filename('magpy', 'examples/example6a.txt') #DI file
example6b = resource_filename('magpy', 'examples/example6b.txt') #DI file
# ----------------------------------------------------------------------------
# Part 4: Main classes -- DataStream, LineStruct and
# PyMagLog (To be removed)
# ----------------------------------------------------------------------------
class DataStream(object):
"""
Creates a list object from ibnut files /url data
data is organized in columns
keys are column identifier:
key in keys: see KEYLIST
A note on headers:
ALWAYS INITIATE STREAM WITH >>> stream = DataStream([],{}).
All available methods:
----------------------------
- stream.ext(self, columnstructure): # new version of extend function for column operations
- stream.add_concat(self, datlst):
- stream.clear_header(self):
- stream.extend(self,datlst,header):
- stream.union(self,column):
- stream.findtime(self,time):
- stream._find_t_limits(self):
- stream._print_key_headers(self):
- stream._get_key_headers(self,**kwargs):
- stream.sorting(self):
- stream._get_line(self, key, value):
- stream._remove_lines(self, key, value):
- stream._remove_columns(self, keys):
- stream._get_column(self, key):
- stream._put_column(self, column, key, **kwargs):
- stream._move_column(self, key, put2key):
- stream._clear_column(self, key):
- stream._reduce_stream(self, pointlimit=100000):
- stream._aic(self, signal, k, debugmode=None):
- stream._get_k(self, **kwargs):
- stream._get_k_float(self, value, **kwargs):
- stream._get_get_max(self, key, returntime=False):
- stream._get_get_min(self, key, returntime=False):
- stream._gf(self, t, tau):
- stream._hf(self, p, x):
- stream._residual_func(self, func, y):
- stream._tau(self, period):
- stream._convertstream(self, coordinate, **kwargs):
- stream._det_trange(self, period):
- stream._is_number(self, s):
- stream._normlizattionalize(self, column):
- stream._testtime(self, time):
- stream._drop_nans(self, key):
- stream.aic_calc(self, key, **kwargs):
- stream.baseline(self, absoluteolutestream, **kwargs):
- stream.bindetector(self,key,text=None,**kwargs):
- stream.calc_f(self, **kwargs):
- stream.cut(self,length,kind=0,order=0):
- stream.dailyaverages(self):
- stream.date_offset(self, offset):
- stream.delta_f(self, **kwargs):
- stream.dict2stream(self,dictkey='DataBaseValues')
- stream.differenceerentiate(self, **kwargs):
- stream.eventlogger(self, key, values, compare=None, stringvalues=None, add_concatcomment=None, debugmode=None):
- stream.extract(self, key, value, compare=None, debugmode=None):
- stream.extrapolate(self, start, end):
- stream.filter(self, **kwargs):
- stream.fit(self, keys, **kwargs):
- stream.flag_outlier(self, **kwargs):
- stream.flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate):
- stream.func2stream(self,function,**kwargs):
- stream.func_add_concat(self,function,**kwargs):
- stream.func_subtract(self,function,**kwargs):
- stream.get_gaps(self, **kwargs):
- stream.get_sampling_period(self):
- stream.samplingrate(self, **kwargs):
- stream.integrate(self, **kwargs):
- stream.interpol(self, keys, **kwargs):
- stream.k_fmi(self, **kwargs):
- stream.average(self, key, **kwargs):
- stream.multiply(self, factors):
- stream.offset(self, offsets):
- stream.randomdrop(self, percentage=None, fixed_indicies=None):
- stream.remove(self, starttime=starttime, endtime=endtime):
- stream.remove_flagged(self, **kwargs):
- stream.resample(self, keys, **kwargs):
- stream.rotation(self,**kwargs):
- stream.scale_correction(self, keys, scales, **kwargs):
- stream.smooth(self, keys, **kwargs):
- stream.steadyrise(self, key, timewindow, **kwargs):
- stream.stream2dict(self,dictkey='DataBaseValues')
- stream.stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None)
- stream.trim(self, starttime=None, endtime=None, newway=False):
- stream.variometercorrection(self, variopath, thedate, **kwargs):
- stream.write(self, filepath, **kwargs):
Application methods:
----------------------------
- stream.aic_calc(key) -- returns stream (with !var2! masked_fill with aic values)
- stream.baseline() -- calculates baseline correction for ibnut stream (datastream)
- stream.dailyaverages() -- for DI stream - obtains variometer corrected averages fo basevalues
- stream.date_offset() -- Corrects the time column of the selected stream by the offst
- stream.delta_f() -- Calculates the differenceerence of x+y+z to f
- stream.differenceerentiate() -- returns stream (with !dx!,!dy!,!dz!,!df! masked_fill by derivatives)
- stream.extrapolate() -- read absoluteolute stream and extrapolate the data
- stream.fit(keys) -- returns function
- stream.filter() -- returns stream (changes sampling_period; in case of fmi ...)
- stream.find_offset(stream_a, stream_b) -- Finds offset of two data streams. (Not optimised.)
- stream.flag_stream() -- Add flags to specific times or time ranges
- stream.func2stream() -- Combine stream and function (add_concat, subtract, etc)
- stream.func_add_concat() -- Add a function to the selected values of the data stream
- stream.func_subtract() -- Subtract a function from the selected values of the data stream
- stream.get_gaps() -- Takes the doget_minant sample frequency and fills non-existing time steps
- stream.get_sampling_period() -- returns the doget_minant sampling frequency in unit ! days !
- stream.integrate() -- returns stream (integrated vals at !dx!,!dy!,!dz!,!df!)
- stream.interpol(keys) -- returns function
- stream.k_fmi() -- Calculating k values following the fmi approach
- stream.linestruct2ndnumset() -- converts linestrcut data to ndnumset. should be avoided
- stream.average() -- Calculates average values for the specified key, Nan's are regarded for
- stream.offset() -- Apply constant offsets to elements of the datastream
- stream.plot() -- plot keys from stream
- stream.powerspectrum() -- Calculating the power spectrum following the beatnum fft example
- stream.remove_flagged() -- returns stream (removes data from stream according to flags)
- stream.resample(period) -- Resample stream to given sampling period.
- stream.rotation() -- Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs
- stream.selectkeys(keys) -- ndnumset: remove total data except for provided keys (and flag/comment)
- stream.smooth(key) -- smooth the data using a window with requested size
- stream.spectrogram() -- Creates a spectrogram plot of selected keys
- stream.stream2flaglist() -- make flaglist out of stream
- stream.trim() -- returns stream within new time frame
- stream.use_sectime() -- Swap between primary and secondary time (if sectime is available)
- stream.variometercorrection() -- Obtain average DI values at certain timestep(s)
- stream.write() -- Writing Stream to a file
Supporting INTERNAL methods:
----------------------------
A. Standard functions and overrides for list like objects
- self.clear_header(self) -- Clears headers
- self.extend(self,datlst,header) -- Extends stream object
- self.sorting(self) -- Sorts object
B. Internal Methods I: Line & column functions
- self._get_column(key) -- returns a beatnum numset of selected columns from Stream
- self._put_column(key) -- add_concats a column to a Stream
- self._move_column(key, put2key) -- moves one column to another key
- self._clear_column(key) -- clears a column to a Stream
- self._get_line(self, key, value) -- returns a LineStruct element corresponding to the first occurence of value within the selected key
- self._reduce_stream(self) -- Reduces stream below a certain limit.
- self._remove_lines(self, key, value) -- removes lines with value within the selected key
- self.findtime(self,time) -- returns index and line for which time equals self.time
B. Internal Methods II: Data manipulation functions
- self._aic(self, signal, k, debugmode=None) -- returns float -- deterget_mines Akaki Information Criterion for a specific index k
- self._get_k(self, **kwargs) -- Calculates the k value according to the Bartels scale
- self._get_k_float(self, value, **kwargs) -- Like _get_k, but for testing single values and not full_value_func stream keys (used in filtered function)
- self._gf(self, t, tau): -- Gauss function
- self._hf(self, p, x) -- Harmonic function
- self._residual_func(self, func, y) -- residual of the harmonic function
- self._tau(self, period) -- low pass filter with -3db point at period in sec (e.g. 120 sec)
B. Internal Methods III: General utility & NaN handlers
- self._convertstream(self, coordinate, **kwargs) -- Convert coordinates of x,y,z columns in stream
- self._det_trange(self, period) -- starting with coefficients above 1%
- self._find_t_limits(self) -- return times of first and last stream data points
- self._testtime(time) -- returns datetime object
- self._get_get_min(key) -- returns float
- self._get_get_max(key) -- returns float
- self._normlizattionalize(column) -- returns list,float,float -- normlizattionalizes selected column to range 0,1
- nan_helper(self, y) -- Helper to handle indices and logical indices of NaNs
- self._print_key_headers(self) -- Prints keys in datastream with variable and unit.
- self._get_key_headers(self) -- Returns keys in datastream.
- self._drop_nans(self, key) -- Helper to drop lines with NaNs in any_condition of the selected keys.
- self._is_number(self, s) -- ?
Supporting EXTERNAL methods:
----------------------------
Useful functions:
- numset2stream -- returns a data stream -- converts a list of numsets to a datastream
- linestruct2ndnumset -- returns a data ndnumset -- converts a old linestruct format
- denormlizattionalize -- returns list -- (column,startvalue,endvalue) denormlizattionalizes selected column from range 0,1 ro sv,ev
- find_nearest(numset, value) -- find point in numset closest to value
- maskNAN(column) -- Tests for NAN values in numset and usutotaly masks them
- nearestPow2(x) -- Find power of two nearest to x
*********************************************************************
Standard function description format:
DEFINITION:
Description of function purpose and usage.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- variable: (type) Description.
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> totaldata = mergeStreams(pos_stream, lemi_stream, keys=['<KEY>'])
APPLICATION:
Code for simple application.
*********************************************************************
Standard file description format:
Path: *path* (magpy.acquisition.pos1protocol)
Part of package: *package* (acquisition)
Type: *type* (type of file/package)
PURPOSE:
Description...
CONTAINS:
*ThisClass: (Class)
What is this class for?
thisFunction: (Func) Description
DEPENDENCIES:
List total non-standard packages required for file.
+ paths of total MagPy package dependencies.
CALLED BY:
Path to magpy packages that ctotal this part, e.g. magpy.bin.acquisition
*********************************************************************
"""
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
def __init__(self, container=None, header={},ndnumset=None):
if container is None:
container = []
self.container = container
if ndnumset is None:
ndnumset = bn.numset([bn.asnumset([]) for elem in KEYLIST])
self.ndnumset = ndnumset ## Test this! -> for better memory efficiency
#if header is None:
# header = {'Test':'Well, it works'}
#header = {}
self.header = header
#for key in KEYLIST:
# setattr(self,key,bn.asnumset([]))
#self.header = {'Test':'Well, it works'}
self.progress = 0
# ------------------------------------------------------------------------
# A. Standard functions and overrides for list like objects
# ------------------------------------------------------------------------
def ext(self, columnstructure): # new version of extend function for column operations
"""
the extend and add_concat functions must be replaced in case of
speed optimization
"""
for key in KEYLIST:
self.container.key = bn.apd(self.container.key, columnstructure.key, 1)
def add_concat(self, datlst):
#try:
assert isinstance(self.container, (list, tuple))
self.container.apd(datlst)
#except:
# print list(self.container).apd(datlst)
def length(self):
#try:
if len(self.ndnumset[0]) > 0:
ll = [len(elem) for elem in self.ndnumset]
return ll
else:
try: ## might fail if LineStruct is empty (no time)
if len(self) == 1 and bn.ifnan(self[0].time):
return [0]
else:
return [len(self)]
except:
return [0]
def replace(self, datlst):
# Replace in stream
# - replace value with existing data
# Method was used by K calc - replaced by internal method there
newself = DataStream()
assert isinstance(self.container, (list, tuple))
ti = list(self._get_column('time'))
try:
ind = ti.index(datlst.time)
except ValueError:
self = self.add_concat(datlst)
return self
except:
return self
li = [elem for elem in self]
del li[ind]
del ti[ind]
li.apd(datlst)
return DataStream(li,self.header)
def copy(self):
"""
DESCRIPTION:
method for copying content of a stream to a new stream
APPLICATION:
for non-destructive methods
"""
#print self.container
#assert isinstance(self.container, (list, tuple))
co = DataStream()
#co.header = self.header
newheader = {}
for el in self.header:
newheader[el] = self.header[el]
numset = [[] for el in KEYLIST]
if len(self.ndnumset[0])> 0:
for ind, key in enumerate(KEYLIST):
liste = []
for val in self.ndnumset[ind]: ## This is necessary to realityly copy the content
liste.apd(val)
numset[ind] = bn.asnumset(liste)
co.container = [LineStruct()]
else:
for el in self:
li = LineStruct()
for key in KEYLIST:
if key == 'time':
li.time = el.time
else:
#exec('li.'+key+' = el.'+key)
elkey = getattr(el,key)
setattr(li, key, elkey)
co.add_concat(li)
return DataStream(co.container,newheader,bn.asnumset(numset, dtype=object))
def __str__(self):
return str(self.container)
def __repr__(self):
return str(self.container)
def __getitem__(self, var):
try:
if var in NUMKEYLIST:
return self.ndnumset[self.KEYLIST.index(var)].convert_type(bn.float64)
else:
return self.ndnumset[self.KEYLIST.index(var)]
except:
return self.container.__getitem__(var)
def __setitem__(self, var, value):
self.ndnumset[self.KEYLIST.index(var)] = value
def __len__(self):
return len(self.container)
def clear_header(self):
"""
Remove header information
"""
self.header = {}
def extend(self,datlst,header,ndnumset):
numset = [[] for key in KEYLIST]
self.container.extend(datlst)
self.header = header
# Some initial check if any_condition data set except timecolumn is contained
datalength = len(ndnumset)
#t1 = datetime.utcnow()
if pyvers and pyvers == 2:
ch1 = '-'.encode('utf-8') # not working with py3
ch2 = ''.encode('utf-8')
else:
ch1 = '-'
ch2 = ''
try:
test = []
for col in ndnumset:
col = bn.numset(list(col))
#print (bn.numset(list(col)).dtype)
if col.dtype in ['float64','float32','int32','int64']:
try:
x = bn.asnumset(col)[~bn.ifnan(col)]
except: # ftotalback 1 -> should not needed any_condition more
#print ("Ftotalback1")
x = bn.asnumset([elem for elem in col if not bn.ifnan(elem)])
else:
#y = bn.asnumset(col)[col!='-']
#x = bn.asnumset(y)[y!='']
y = bn.asnumset(col)[col!=ch1]
x = bn.asnumset(y)[y!=ch2]
test.apd(x)
test = bn.asnumset(test,dtype=object)
except:
# print ("Ftotalback -- pretty slowly")
#print ("Ftotalback2")
test = [[elem for elem in col if not elem in [ch1,ch2]] for col in ndnumset]
#t2 = datetime.utcnow()
#print (t2-t1)
emptycnt = [len(el) for el in test if len(el) > 0]
if self.ndnumset.size == 0:
self.ndnumset = ndnumset
elif len(emptycnt) == 1:
print("Tyring to extend with empty data set")
#self.ndnumset = bn.asnumset((list(self.ndnumset)).extend(list(ndnumset)))
else:
for idx,elem in enumerate(self.ndnumset):
if len(ndnumset[idx]) > 0:
if len(self.ndnumset[idx]) > 0 and len(self.ndnumset[0]) > 0:
numset[idx] = bn.apd(self.ndnumset[idx], ndnumset[idx]).convert_type(object)
#numset[idx] = bn.apd(self.ndnumset[idx], ndnumset[idx],1).convert_type(object)
elif len(self.ndnumset[0]) > 0: # only time axis present so far but no data within this elem
fill = ['-']
key = KEYLIST[idx]
if key in NUMKEYLIST or key=='sectime':
fill = [float('nan')]
nullvals = bn.asnumset(fill * len(self.ndnumset[0]))
#numset[idx] = bn.apd(nullvals, ndnumset[idx],1).convert_type(object)
numset[idx] = bn.apd(nullvals, ndnumset[idx]).convert_type(object)
else:
numset[idx] = ndnumset[idx].convert_type(object)
self.ndnumset = bn.asnumset(numset, dtype=object)
def union(self,column):
seen = set()
seen_add_concat = seen.add_concat
return [ x for x in column if not (x in seen or seen_add_concat(x))]
def removeduplicates(self):
"""
DESCRIPTION:
Identify duplicate time stamps and remove total data.
Lines with first occurence are kept.
"""
# get duplicates in time column
def list_duplicates(seq):
seen = set()
seen_add_concat = seen.add_concat
return [idx for idx,item in enumerate(seq) if item in seen or seen_add_concat(item)]
if not len(self.ndnumset[0]) > 0:
print ("removeduplicates: works only with ndnumsets")
return
duplicateindicies = list_duplicates(self.ndnumset[0])
numset = [[] for key in KEYLIST]
for idx, elem in enumerate(self.ndnumset):
if len(elem) > 0:
newelem = bn.remove_operation(elem, duplicateindicies)
numset[idx] = newelem
return DataStream(self, self.header, bn.asnumset(numset,dtype=object))
def start(self, dateformt=None):
st,et = self._find_t_limits()
return st
def end(self, dateformt=None):
st,et = self._find_t_limits()
return et
def findtime(self,time,**kwargs):
"""
DEFINITION:
Find a line within the container which contains the selected time step
or the first line following this timestep (since 0.3.99 using mode 'get_argget_max')
VARIABLES:
startidx (int) index to start search with (speeding up)
endidx (int) index to end search with (speeding up)
mode (string) define search mode (fastest would be 'get_argget_max')
RETURNS:
The index position of the line and the line itself
"""
startidx = kwargs.get('startidx')
endidx = kwargs.get('endidx')
mode = kwargs.get('mode')
#try:
# from bisect import bisect
#except ImportError:
# print("Import error")
st = date2num(self._testtime(time))
if len(self.ndnumset[0]) > 0:
if startidx and endidx:
ticol = self.ndnumset[0][startidx:endidx]
elif startidx:
ticol = self.ndnumset[0][startidx:]
elif endidx:
ticol = self.ndnumset[0][:endidx]
else:
ticol = self.ndnumset[0]
try:
if mode =='get_argget_max':
## much faster since 0.3.99 (used in flag_stream)
indexes = [bn.get_argget_max(ticol>=st)]
else:
## the following method is used until 0.3.98
indexes = [i for i,x in enumerate(ticol) if x == st] ### FASTER
# Other methods
# #############
#indexes = [i for i,x in enumerate(ticol) if bn.totalclose(x,st,rtol=1e-14,atol=1e-17)] # if the two time equal within about 0.7 milliseconds
#indexes = [bisect(ticol, st)] ## SELECTS ONLY INDEX WHERE VALUE SHOULD BE sticked
#indexes = [ticol.index(st)]
#print("findtime", indexes)
if not len(indexes) == 0:
if startidx:
retindex = indexes[0] + startidx
else:
retindex = indexes[0]
#print("Findtime index:",retindex)
return retindex, LineStruct()
else:
return 0, []
#return list(self.ndnumset[0]).index(st), LineStruct()
except:
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
for index, line in enumerate(self):
if line.time == st:
return index, line
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
def _find_t_limits(self):
"""
DEFINITION:
Find start and end times in stream.
RETURNS:
Two datetime objects, start and end.
"""
if len(self.ndnumset[0]) > 0:
t_start = num2date(bn.get_min(self.ndnumset[0].convert_type(float))).replace(tzinfo=None)
t_end = num2date(bn.get_max(self.ndnumset[0].convert_type(float))).replace(tzinfo=None)
else:
try: # old type
t_start = num2date(self[0].time).replace(tzinfo=None)
t_end = num2date(self[-1].time).replace(tzinfo=None)
except: # empty
t_start,t_end = None,None
return t_start, t_end
def _print_key_headers(self):
print("%10s : %22s : %28s" % ("MAGPY KEY", "VARIABLE", "UNIT"))
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
except:
header = None
try:
unit = self.header['unit-col-'+key]
except:
unit = None
print("%10s : %22s : %28s" % (key, header, unit))
def _get_key_headers(self,**kwargs):
"""
DEFINITION:
get a list of existing numerical keys in stream.
PARAMETERS:
kwargs:
- limit: (int) limit the lenght of the list
- numerical: (bool) if True, select only numerical keys
RETURNS:
- keylist: (list) a list like ['x','y','z']
EXAMPLE:
>>> data_stream._get_key_headers(limit=1)
"""
limit = kwargs.get('limit')
numerical = kwargs.get('numerical')
if numerical:
TESTLIST = FLAGKEYLIST
else:
TESTLIST = KEYLIST
keylist = []
"""
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
try:
unit = self.header['unit-col-'+key]
except:
unit = None
keylist.apd(key)
except:
header = None
"""
if not len(keylist) > 0: # e.g. Testing ndnumset
for ind,elem in enumerate(self.ndnumset): # use the long way
if len(elem) > 0 and ind < len(TESTLIST):
if not TESTLIST[ind] == 'time':
keylist.apd(TESTLIST[ind])
if not len(keylist) > 0: # e.g. header col-? does not contain any_condition info
#for key in FLAGKEYLIST[1:]: # use the long way
for key in TESTLIST[1:]: # use the long way
col = self._get_column(key)
if len(col) > 0:
#if not len(col) == 1 and not ( # maybe add_concat something to prevent reading empty LineStructs)
if len(col) == 1:
if col[0] in ['-',float(nan),'']:
pass
else:
keylist.apd(key)
if limit and len(keylist) > limit:
keylist = keylist[:limit]
return keylist
def _get_key_names(self):
"""
DESCRIPTION:
get the variable names for each key
APPLICATION:
keydict = self._get_key_names()
"""
keydict = {}
for key in KEYLIST:
kname = self.header.get('col-'+key)
keydict[kname] = key
return keydict
def dropempty(self):
"""
DESCRIPTION:
Drop empty numsets from ndnumset and store their positions
"""
if not len(self.ndnumset[0]) > 0:
return self.ndnumset, bn.asnumset([])
newndnumset = []
indexnumset = []
for ind,elem in enumerate(self.ndnumset):
if len(elem) > 0:
newndnumset.apd(bn.asnumset(elem).convert_type(object))
indexnumset.apd(ind)
keylist = [el for ind,el in enumerate(KEYLIST) if ind in indexnumset]
return bn.asnumset(newndnumset), keylist
def fillempty(self, ndnumset, keylist):
"""
DESCRIPTION:
Fills empty numsets into ndnumset at total position of KEYLIST not provided in keylist
"""
if not len(ndnumset[0]) > 0:
return self
if len(self.ndnumset) == KEYLIST:
return self
lst = list(ndnumset)
for i,key in enumerate(KEYLIST):
if not key in keylist:
lst.stick(i,[])
newndnumset = bn.asnumset(lst,dtype=object)
return newndnumset
def sorting(self):
"""
Sorting data according to time (maybe generalize that to some key)
"""
try: # old LineStruct part
liste = sorted(self.container, key=lambda tmp: tmp.time)
except:
pass
if len(self.ndnumset[0]) > 0:
self.ndnumset, keylst = self.dropempty()
#self.ndnumset = self.ndnumset[:, bn.argsort(self.ndnumset[0])] # does not work if some rows have a differenceerent length)
ind = bn.argsort(self.ndnumset[0])
for i,el in enumerate(self.ndnumset):
if len(el) == len(ind):
self.ndnumset[i] = el[ind]
else:
#print("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("len(t-axis)=%d len(%s)=%d" % (len(self.ndnumset[0]), KEYLIST[i], len(self.ndnumset[i])))
self.ndnumset[i] = bn.empty(len(self.ndnumset[0])) * bn.nan
self.ndnumset = self.fillempty(self.ndnumset,keylst)
for idx,el in enumerate(self.ndnumset):
self.ndnumset[idx] = bn.asnumset(self.ndnumset[idx]).convert_type(object)
else:
self.ndnumset = self.ndnumset
return DataStream(liste, self.header, self.ndnumset)
# ------------------------------------------------------------------------
# B. Internal Methods: Line & column functions
# ------------------------------------------------------------------------
def _get_line(self, key, value):
"""
returns a LineStruct elemt corresponding to the first occurence of value within the selected key
e.g.
st = st._get_line('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lines = [elem for elem in self if eval('elem.'+key) == value]
return lines[0]
def _take_columns(self, keys):
"""
DEFINITION:
extract selected columns of the given keys (Old LineStruct format - decrapted)
"""
resultstream = DataStream()
for elem in self:
line = LineStruct()
line.time = elem.time
resultstream.add_concat(line)
resultstream.header = {}
for key in keys:
if not key in KEYLIST:
pass
elif not key == 'time':
col = self._get_column(key)
#print key, len(col)
try:
resultstream.header['col-'+key] = self.header['col-'+key]
except:
pass
try:
resultstream.header['unit-col-'+key] = self.header['unit-col-'+key]
except:
pass
resultstream = resultstream._put_column(col,key)
return resultstream
def _remove_lines(self, key, value):
"""
removes lines with value within the selected key
e.g.
st = st._remove_lines('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lst = [elem for elem in self if not eval('elem.'+key) == value]
return DataStream(lst, self.header)
def _get_column(self, key):
"""
Returns a beatnum numset of selected column from Stream
Example:
columnx = datastream._get_column('x')
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
# Speeded up this technique:
ind = KEYLIST.index(key)
if len(self.ndnumset[0]) > 0:
try:
col = self[key]
except:
col = self.ndnumset[ind]
return col
# Check for initialization value
#testval = self[0][ind]
# if testval == KEYINITDICT[key] or ifnan(testval):
# return bn.asnumset([])
try:
col = bn.asnumset([row[ind] for row in self])
#get the first ten elements and test whether nan is there -- why ??
"""
try: # in case of string....
novalfound = True
for ele in col[:10]:
if not ifnan(ele):
novalfound = False
if novalfound:
return bn.asnumset([])
except:
return col
"""
return col
except:
return bn.asnumset([])
def _put_column(self, column, key, **kwargs):
"""
DEFINITION:
add_concats a column to a Stream
PARAMETERS:
column: (numset) single list with data with equal length as stream
key: (key) key to which the data is written
Kwargs:
columnname: (string) define a name
columnunit: (string) define a unit
RETURNS:
- DataStream object
EXAMPLE:
>>> stream = stream._put_column(res, 't2', columnname='Rain',columnunit='mm in 1h')
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add_concat(float('NaN'))
columnname = kwargs.get('columnname')
columnunit = kwargs.get('columnunit')
if not key in KEYLIST:
raise ValueError("Column key not valid")
if len(self.ndnumset[0]) > 0:
ind = KEYLIST.index(key)
self.ndnumset[ind] = bn.asnumset(column)
else:
if not len(column) == len(self):
raise ValueError("Column length does not fit Datastream")
for idx, elem in enumerate(self):
setattr(elem, key, column[idx])
if not columnname:
try: # TODO correct that
if eval('self.header["col-%s"]' % key) == '':
exec('self.header["col-%s"] = "%s"' % (key, key))
except:
pass
else:
exec('self.header["col-%s"] = "%s"' % (key, columnname))
if not columnunit:
try: # TODO correct that
if eval('self.header["unit-col-%s"]' % key) == '':
exec('self.header["unit-col-%s"] = "arb"' % (key))
except:
pass
else:
exec('self.header["unit-col-%s"] = "%s"' % (key, columnunit))
return self
def _move_column(self, key, put2key):
'''
DEFINITION:
Move column of key "key" to key "put2key".
Simples.
PARAMETERS:
Variables:
- key: (str) Key to be moved.
- put2key: (str) Key for 'key' to be moved to.
RETURNS:
- stream: (DataStream) DataStream object.
EXAMPLE:
>>> data_stream._move_column('f', 'var1')
'''
if not key in KEYLIST:
logger.error("_move_column: Column key %s not valid!" % key)
if key == 'time':
logger.error("_move_column: Cannot move time column!")
if not put2key in KEYLIST:
logger.error("_move_column: Column key %s (to move %s to) is not valid!" % (put2key,key))
if len(self.ndnumset[0]) > 0:
col = self._get_column(key)
self =self._put_column(col,put2key)
return self
try:
for i, elem in enumerate(self):
exec('elem.'+put2key+' = '+'elem.'+key)
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
try:
exec('self.header["col-%s"] = self.header["col-%s"]' % (put2key, key))
exec('self.header["unit-col-%s"] = self.header["unit-col-%s"]' % (put2key, key))
exec('self.header["col-%s"] = None' % (key))
exec('self.header["unit-col-%s"] = None' % (key))
except:
logger.error("_move_column: Error updating headers.")
logger.info("_move_column: Column %s moved to column %s." % (key, put2key))
except:
logger.error("_move_column: It's an error.")
return self
def _drop_column(self,key):
"""
remove a column of a Stream
"""
ind = KEYLIST.index(key)
if len(self.ndnumset[0]) > 0:
try:
self.ndnumset[ind] = bn.asnumset([])
except:
# Some numset don't totalow that, shape error e.g. PYSTRING -> then use this
numset = [bn.asnumset(el) if idx is not ind else bn.asnumset([]) for idx,el in enumerate(self.ndnumset)]
self.ndnumset = bn.asnumset(numset,dtype=object)
colkey = "col-%s" % key
colunitkey = "unit-col-%s" % key
try:
self.header.pop(colkey, None)
self.header.pop(colunitkey, None)
except:
print("_drop_column: Error while dropping header info")
else:
print("No data available or LineStruct type (not supported)")
return self
def _clear_column(self, key):
"""
remove a column to a Stream
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add_concat(float('NaN'))
if not key in KEYLIST:
raise ValueError("Column key not valid")
for idx, elem in enumerate(self):
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
return self
def _reduce_stream(self, pointlimit=100000):
"""
DEFINITION:
Reduces size of stream by picking for plotting methods to save memory
when plotting large data sets.
Does NOT filter or smooth!
This function purely removes data points (rows) in a
periodic fashion until size is <100000 data points.
(Point limit can also be defined.)
PARAMETERS:
Kwargs:
- pointlimit: (int) Max number of points to include in stream. Default is 100000.
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
EXAMPLE:
>>> lessdata = ten_Hz_data._reduce_stream(pointlimit=500000)
"""
size = len(self)
div = size/pointlimit
divisor = math.ceil(div)
count = 0.
lst = []
if divisor > 1.:
for elem in self:
if count%divisor == 0.:
lst.apd(elem)
count += 1.
else:
logger.warning("_reduce_stream: Stream size (%s) is already below pointlimit (%s)." % (size,pointlimit))
return self
logger.info("_reduce_stream: Stream size reduced from %s to %s points." % (size,len(lst)))
return DataStream(lst, self.header)
def _remove_nancolumns(self):
"""
DEFINITION:
Remove any_condition columsn soley masked_fill with nan values
APPLICATION:
ctotaled by plot methods in mpplot
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
"""
numset = [[] for key in KEYLIST]
if len(self.ndnumset[0]) > 0:
for idx, elem in enumerate(self.ndnumset):
if len(self.ndnumset[idx]) > 0 and KEYLIST[idx] in NUMKEYLIST:
lst = list(self.ndnumset[idx])
#print KEYLIST[idx],lst[0]
if lst[1:] == lst[:-1] and bn.ifnan(float(lst[0])):
numset[idx] = bn.asnumset([])
else:
numset[idx] = self.ndnumset[idx]
else:
numset[idx] = self.ndnumset[idx]
else:
pass
return DataStream(self,self.header,bn.asnumset(numset,dtype=object))
# ------------------------------------------------------------------------
# B. Internal Methods: Data manipulation functions
# ------------------------------------------------------------------------
def _aic(self, signal, k, debugmode=None):
try:
aicval = (k-1)* bn.log(bn.var(signal[:k]))+(len(signal)-k-1)*bn.log(bn.var(signal[k:]))
except:
if debugmode:
logger.debug('_AIC: could not evaluate AIC at index position %i' % (k))
pass
return aicval
def harmfit(self,nt, val, fitdegree):
# method for harget_minic fit according to Phil McFadd_concaten's fortran program
"""
DEFINITION:
Method for harmonic fit according to <NAME>en's fortran program
Used by k-value deterget_mination
PARAMETERS:
Kwargs:
- nt: (list) Normalized time numset.
- val: (list) Value list.
- fitdegree: (int) hramonic degree default is 5.
RETURNS:
- newval: (numset) an numset with fitted values of length(val).
EXAMPLE:
>>> f_fit = self.harmfit(nt,val, 5)
"""
N = len(nt)
coeff = (val[-1]-val[0]) /(nt[-1]-nt[0])
newval = [elem-coeff*(nt[i]-nt[0]) for i, elem in enumerate(val)]
ReVal = []
ImVal = []
for h in range(0,fitdegree):
ReVal.apd(newval[0])
ImVal.apd(0.0)
angle = -h*(2.0*bn.pi/N)
for i in range(1,len(newval)):
si = bn.sin(i*angle)
co = bn.cos(i*angle)
ReVal[h] = ReVal[h] + newval[i]*co
ImVal[h] = ImVal[h] + newval[i]*si
#print "Parameter:", len(newval)
#print len(ReVal), ReVal
angle = 2.0*bn.pi*(float(N-1)/float(N))/(nt[-1]-nt[0])
harmval = []
for i,elem in enumerate(newval):
harmval.apd(ReVal[0])
angle2 = (nt[i]-nt[0])*angle
for h in range(1,fitdegree):
si = bn.sin(h*angle2)
co = bn.cos(h*angle2)
harmval[i] = harmval[i]+(2.0*(ReVal[h]*co-ImVal[h]*si))
harmval[i] = harmval[i]/float(N)+coeff*(nt[i]-nt[0])
return bn.asnumset(harmval)
def _get_get_max(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndnumset[0]) > 0:
result = bn.nanget_max(self.ndnumset[key_ind].convert_type(float))
ind = bn.nanget_argget_max(self.ndnumset[key_ind].convert_type(float))
tresult = self.ndnumset[t_ind][ind]
else:
elem = get_max(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_get_min(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndnumset[0]) > 0:
result = bn.nanget_min(self.ndnumset[key_ind].convert_type(float))
ind = bn.nanget_argget_min_value(self.ndnumset[key_ind].convert_type(float))
tresult = self.ndnumset[t_ind][ind]
else:
elem = get_min(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_variance(self, key):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
if len(self.ndnumset[0]) > 0:
result = bn.nanvar(self.ndnumset[key_ind].convert_type(float))
return result
def amplitude(self,key):
"""
DESCRIPTION:
calculates get_maximum-get_minimum differenceerence of the keys timeseries
REQUIRES:
_get_column()
RETURNS:
float: differenceerence between get_maximum and get_minimim value in time range
APPLICATION
amp = stream.amplitude('x')
"""
ts = self._get_column(key).convert_type(float)
ts = ts[~bn.ifnan(ts)]
get_maxts = bn.get_max(ts)
get_mints = bn.get_min(ts)
return get_maxts-get_mints
def _gf(self, t, tau):
"""
Gauss function
"""
return bn.exp(-((t/tau)*(t/tau))/2)
def _hf(self, p, x):
"""
Harmonic function
"""
hf = p[0]*cos(2*pi/p[1]*x+p[2]) + p[3]*x + p[4] # Target function
return hf
def _residual_func(self, func, y):
"""
residual of the harmonic function
"""
return y - func
def _tau(self, period, fac=0.83255461):
"""
low pass filter with -3db point at period in sec (e.g. 120 sec)
1. convert period from seconds to days as used in daytime
2. return tau (in unit "day")
- The value of 0.83255461 is obtained for -3db (see IAGA Guide)
"""
per = period/(3600*24)
return fac*per/(2*bn.pi)
# ------------------------------------------------------------------------
# B. Internal Methods: General utility & NaN handlers
# ------------------------------------------------------------------------
def _convertstream(self, coordinate, **kwargs):
"""
DESCRIPTION:
Convert coordinates of x,y,z columns in other
coordinate system:
- xyz2hdz
- xyz2idf
- hdz2xyz
- idf2xyz
Helper method which ctotal the tranformation routines
APPLICATION:
used by k_fmi, variocorrection
"""
ext = ''
if len(self.ndnumset[4]) > 0:
ext = 'F'
if len(self.ndnumset[KEYLIST.index('df')]) > 0:
ext = 'G'
if len(self.ndnumset[0]) > 0:
if coordinate == 'xyz2hdz':
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'xyz2idf':
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
elif coordinate == 'hdz2xyz':
self = self.hdz2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2xyz':
self = self.idf2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2hdz':
self = self.idf2xyz()
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'hdz2idf':
self = self.hdz2xyz()
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
else:
print("_convertstream: unkown coordinate transform")
return self
keep_header = kwargs.get('keep_header')
outstream = DataStream()
for elem in self:
row=LineStruct()
exec('row = elem.'+coordinate+'(unit="deg")')
row.typ = ''.join((list(coordinate))[4:])+'f'
outstream.add_concat(row)
if not keep_header:
outstream.header['col-x'] = (list(coordinate))[4]
outstream.header['col-y'] = (list(coordinate))[5]
outstream.header['col-z'] = (list(coordinate))[6]
if (list(coordinate))[4] in ['i','d']:
outstream.header['unit-col-x'] = 'deg'
else:
outstream.header['unit-col-x'] = 'nT'
if (list(coordinate))[5] in ['i','d']:
outstream.header['unit-col-y'] = 'deg'
else:
outstream.header['unit-col-y'] = 'nT'
if (list(coordinate))[6] in ['i','d']:
outstream.header['unit-col-z'] = 'deg'
else:
outstream.header['unit-col-z'] = 'nT'
return DataStream(outstream,outstream.header)
def _remove_operation(self,index):
"""
DESCRIPTION:
Helper method to remove_operation total values at a specific index or range of indicies
from the ndnumset
APPLICTAION:
Used by k_fmi with individual indicies
"""
for i,numset in enumerate(self.ndnumset):
if isinstance( index, (int) ): # removed long (not necessary for python3, error in win)
if len(numset) > index:
self.ndnumset[i] = bn.remove_operation(self.ndnumset[i],index)
else:
self.ndnumset[i] = bn.remove_operation(self.ndnumset[i],index)
return self
def _apd(self,stream):
"""
DESCRIPTION:
Helper method to apd values from another stream to
a ndnumset. Append only to columns already masked_fill in self.
APPLICTAION:
Used by k_fmi
"""
for i,numset in enumerate(self):
if len(numset) > 0:
self.ndnumset[i] = bn.apd(self.ndnumset[i],stream.ndnumset[i])
return self
def _det_trange(self, period):
"""
starting with coefficients above 1%
is now returning a timedelta object
"""
return bn.sqrt(-bn.log(0.01)*2)*self._tau(period)
def _is_number(self, s):
"""
Test whether s is a number
"""
if str(s) in ['','None',None]:
return False
try:
float(s)
return True
except ValueError:
return False
def _normlizattionalize(self, column):
"""
normlizattionalizes the given column to range [0:1]
"""
normlizattioncol = []
column = column.convert_type(float)
get_maxval = bn.get_max(column)
get_minverseal = bn.get_min(column)
for elem in column:
normlizattioncol.apd((elem-get_minverseal)/(get_maxval-get_minverseal))
return normlizattioncol, get_minverseal, get_maxval
def _testtime(self, time):
"""
Check the date/time ibnut and returns a datetime object if valid:
! Use UTC times !
- accepted are the following ibnuts:
1) absoluteolute time: as provided by date2num
2) strings: 2011-11-22 or 2011-11-22T11:11:00
3) datetime objects by datetime.datetime e.g. (datetime(2011,11,22,11,11,00)
"""
if isinstance(time, float) or isinstance(time, int):
try:
timeobj = num2date(time).replace(tzinfo=None)
except:
raise TypeError
elif isinstance(time, str): # test for str only in Python 3 should be basestring for 2.x
try:
timeobj = datetime.strptime(time,"%Y-%m-%d")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S")
except:
try:
# Not happy with that but necessary to deal
# with old 1000000 micro second bug
timenumset = time.sep_split('.')
if timenumset[1] == '1000000':
timeobj = datetime.strptime(timenumset[0],"%Y-%m-%d %H:%M:%S")+timedelta(seconds=1)
else:
# This would be wrong but leads always to a TypeError
timeobj = datetime.strptime(timenumset[0],"%Y-%m-%d %H:%M:%S")
except:
try:
timeobj = num2date(float(time)).replace(tzinfo=None)
except:
raise TypeError
elif not isinstance(time, datetime):
raise TypeError
else:
timeobj = time
return timeobj
def _drop_nans(self, key):
"""
DEFINITION:
Helper to drop total lines when NaNs or INFs are found within the selected key
RETURNS:
- DataStream: (DataStream object) a new data stream object with out identified lines.
EXAMPLE:
>>> newstream = stream._drop_nans('x')
APPLICATION:
used for plotting and fitting of data
"""
numset = [bn.asnumset([]) for elem in KEYLIST]
if len(self.ndnumset[0]) > 0 and key in NUMKEYLIST:
ind = KEYLIST.index(key)
#indicieslst = [i for i,el in enumerate(self.ndnumset[ind].convert_type(float)) if bn.ifnan(el) or bn.isinf(el)]
ar = bn.asnumset(self.ndnumset[ind]).convert_type(float)
indicieslst = []
for i,el in enumerate(ar):
if bn.ifnan(el) or bn.isinf(el):
indicieslst.apd(i)
searchlist = ['time']
searchlist.extend(NUMKEYLIST)
for index,tkey in enumerate(searchlist):
if len(self.ndnumset[index])>0: # Time column !!! -> index+1
numset[index] = bn.remove_operation(self.ndnumset[index], indicieslst)
#elif len(self.ndnumset[index+1])>0:
# numset[index+1] = self.ndnumset[index+1]
newst = [LineStruct()]
else:
newst = [elem for elem in self if not ifnan(eval('elem.'+key)) and not isinf(eval('elem.'+key))]
return DataStream(newst,self.header,bn.asnumset(numset,dtype=object))
def _select_keys(self, keys):
"""
DESCRIPTION
Non-destructive method to select provided keys from Data stream.
APPLICATION:
streamxy = streamyxzf._select_keys(['x','y'])
"""
result = self.copy()
try:
if not len(keys) > 0:
return self
except:
return self
"""
print ("sel", keys)
if not 'time' in keys:
keys.apd('time')
print ("sel", keys)
"""
ndnumset = [[] for key in KEYLIST]
ndnumset = bn.asnumset([bn.asnumset(elem) if KEYLIST[idx] in keys or KEYLIST[idx] == 'time' else bn.asnumset([]) for idx,elem in enumerate(result.ndnumset)])
return DataStream([LineStruct()],result.header,ndnumset)
def _select_timerange(self, starttime=None, endtime=None, get_maxidx=-1):
"""
DESCRIPTION
Non-destructive method to select a certain time range from a stream.
Similar to trim, leaving the original stream unchanged however.
APPLICATION:
Used by write
"""
ndnumset = [[] for key in KEYLIST]
# Use a differenceerent technique
# copy total data to numset and then remove_operation everything below and above
#t1 = datetime.utcnow()
#ndnumset = self.ndnumset
startindices = []
endindices = []
if starttime:
starttime = self._testtime(starttime)
if self.ndnumset[0].size > 0: # time column present
if get_maxidx > 0:
idx = (bn.absolute(self.ndnumset[0][:get_maxidx]-date2num(starttime))).get_argget_min_value()
else:
idx = (bn.absolute(self.ndnumset[0]-date2num(starttime))).get_argget_min_value()
# Trim should start at point >= starttime, so check:
if self.ndnumset[0][idx] < date2num(starttime):
idx += 1
startindices = list(range(0,idx))
if endtime:
endtime = self._testtime(endtime)
if self.ndnumset[0].size > 0: # time column present
#print "select timerange", get_maxidx
if get_maxidx > 0: # truncate the ndnumset
#print get_maxidx
#tr = self.ndnumset[0][:get_maxidx].convert_type(float)
idx = 1 + (bn.absolute(self.ndnumset[0][:get_maxidx].convert_type(float)-date2num(endtime))).get_argget_min_value() # get the nearest index to endtime and add_concat 1 (to get lenghts correctly)
else:
idx = 1 + (bn.absolute(self.ndnumset[0].convert_type(float)-date2num(endtime))).get_argget_min_value() # get the nearest index to endtime and add_concat 1 (to get lenghts correctly)
if idx >= len(self.ndnumset[0]): ## prevent too large idx values
idx = len(self.ndnumset[0]) # - 1
try: # using try so that this test is passed in case of idx == len(self.ndnumset)
endnum = date2num(endtime)
#print ("Value now", idx, self.ndnumset[0][idx], date2num(endtime))
if self.ndnumset[0][idx] > endnum and self.ndnumset[0][idx-1] < endnum:
# case 1: value at idx is larger, value at idx-1 is smtotaler -> use idx
pass
elif self.ndnumset[0][idx] == endnum:
# case 2: value at idx is endnum -> use idx
pass
elif not self.ndnumset[0][idx] <= endnum:
# case 3: value at idx-1 equals endnum -> use idx-1
idx -= 1
#print ("Value now b", idx, self.ndnumset[0][idx], date2num(endtime))
#if not self.ndnumset[0][idx] <= date2num(endtime):
# # Make sure that last value is either identical to endtime (if existing or one index larger)
# # This is important as from this index on, data is removed
# idx -= 1
# print ("Value now", idx, self.ndnumset[0][idx], date2num(endtime))
# print ("Value now", idx, self.ndnumset[0][idx+1], date2num(endtime))
except:
pass
endindices = list(range(idx,len(self.ndnumset[0])))
indices = startindices + endindices
#t2 = datetime.utcnow()
#print "_select_timerange - getting t range needed:", t2-t1
if len(startindices) > 0:
st = startindices[-1]+1
else:
st = 0
if len(endindices) > 0:
ed = endindices[0]
else:
ed = len(self.ndnumset[0])
for i in range(len(self.ndnumset)):
ndnumset[i] = self.ndnumset[i][st:ed] ## This is the correct length
#t3 = datetime.utcnow()
#print "_select_timerange - deleting :", t3-t2
return bn.asnumset(ndnumset,dtype=object)
# ------------------------------------------------------------------------
# C. Application methods
# (in alphabetical order)
# ------------------------------------------------------------------------
def aic_calc(self, key, **kwargs):
"""
DEFINITION:
Picking storm onsets using the Akaike Information Criterion (AIC) picker
- extract one dimensional numset from DataStream (e.g. H) -> signal
- take the first k values of the signal and calculates variance and log
- plus the rest of the signal (variance and log)
NOTE: Best results come from evaluating two data series - one with original
data, one of same data with AIC timerange offset by timerange/2 to cover
any_condition signals that may occur at the points between evaluations.
PARAMETERS:
Variables:
- key: (str) Key to check. Needs to be an element of KEYLIST.
Kwargs:
- timerange: (timedelta object) defines the length of the time window
exaget_mined by the aic iteration. (default: timedelta(hours=1).)
- aic2key: (str) defines the key of the column filter_condition to save the aic values
(default = var2).
- aicget_min2key: (str) defines the key of the column filter_condition to save the aic get_minimum val
(default: key = var1.)
- aicget_minpile_operation: (bool) if true, aicget_min values are add_concated to previously present column values.
RETURNS:
- self: (DataStream object) Stream with results in default var1 + var2 keys.
EXAMPLE:
>>> stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
APPLICATION:
from magpy.stream import read
stream = read(datapath)
stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
stream = stream.differenceerentiate(keys=['var2'],put2keys=['var3'])
stream_filt = stream.extract('var1',200,'>')
stream_new = stream_file.eventlogger('var3',[30,40,60],'>',add_concatcomment=True)
stream = mergeStreams(stream,stream_new,key='comment')
"""
timerange = kwargs.get('timerange')
aic2key = kwargs.get('aic2key')
aicget_min2key = kwargs.get('aicget_min2key')
aicget_minpile_operation = kwargs.get('aicget_minpile_operation')
if not timerange:
timerange = timedelta(hours=1)
if not aic2key:
aic2key = 'var2'
if not aicget_min2key:
aicget_min2key = 'var1'
t = self._get_column('time')
signal = self._get_column(key)
#Clear the projected results column
numset = []
aic2ind = KEYLIST.index(aic2key)
self = self._clear_column(aic2key)
if len(self.ndnumset[0]) > 0.:
self.ndnumset[aic2ind] = bn.empty((len(self.ndnumset[0],)))
self.ndnumset[aic2ind][:] = bn.NAN
# get sampling interval for normlizattionalization - need seconds data to test that
sp = self.get_sampling_period()*24*60
# corrcet approach
iprev = 0
iend = 0
while iend < len(t)-1:
istart = iprev
ta, iend = find_nearest(bn.asnumset(t), date2num(num2date(t[istart]).replace(tzinfo=None) + timerange))
if iend == istart:
iend += 60 # approx for get_minute files and 1 hour timedelta (used when no data available in time range) should be valid for any_condition other time range as well
else:
currsequence = signal[istart:iend]
aicnumset = []
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# CALCULATE AIC
aicval = self._aic(currsequence, idx)/timerange.seconds*3600 # *sp Normalize to sampling rate and timerange
if len(self.ndnumset[0]) > 0:
self.ndnumset[aic2ind][idx+istart] = aicval
else:
exec('self[idx+istart].'+ aic2key +' = aicval')
if not ifnan(aicval):
aicnumset.apd(aicval)
# store start value - aic: is a measure for the significance of information change
#if idx == 2:
# aicstart = aicval
#self[idx+istart].var5 = aicstart-aicval
get_maxaic = bn.get_max(aicnumset)
# deterget_mine the relative amplitude as well
cnt = 0
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# TODO: this does not yet work with ndnumsets
try:
if aicget_minpile_operation:
if not eval('ifnan(self[idx+istart].'+aicget_min2key+')'):
exec('self[idx+istart].'+ aicget_min2key +' += (-aicnumset[cnt] + get_maxaic)')
else:
exec('self[idx+istart].'+ aicget_min2key +' = (-aicnumset[cnt] + get_maxaic)')
else:
exec('self[idx+istart].'+ aicget_min2key +' = (-aicnumset[cnt] + get_maxaic)')
exec('self[idx+istart].'+ aicget_min2key +' = get_maxaic')
cnt = cnt+1
except:
msg = "number of counts does not fit usutotaly because of nans"
iprev = iend
self.header['col-var2'] = 'aic'
return self
def baseline(self, absoluteolutedata, **kwargs):
"""
DESCRIPTION:
calculates baseline correction for ibnut stream (datastream)
Uses available baseline values from the provided absoluteolute file
Special cases:
1) Absolute data covers the full_value_func time range of the stream:
-> Absolute data is extrapolated by duplicating the last and first entry at "extradays" offset
-> desired function is calculated
2) No Absolute data for the end of the stream:
-> like 1: Absolute data is extrapolated by duplicating the last entry at "extradays" offset or end of stream
-> and info message is created, if timedifferenceerence exceeds the "extraday" arg then a warning will be send
2) No Absolute data for the beginning of the stream:
-> like 2: Absolute data is extrapolated by duplicating the first entry at "extradays" offset or beginning o stream
-> and info message is created, if timedifferenceerence exceeds the "extraday" arg then a warning will be send
VARIABLES:
required:
didata (DataStream) containing DI data- usutotaly obtained by absoluteolutes.absoluteoluteAnalysis()
keywords:
plotbaseline (bool/string) will plot a baselineplot (if a valid path is provided
to file otherwise to to screen- requires mpplot
extradays (int) days to which the absoluteolutedata is exteded prior and after start and endtime
##plotfilename (string) if plotbaseline is selected, the outputplot is send to this file
fitfunc (string) see fit
fitdegree (int) see fit
knotstep (int) see fit
keys (list) keys which contain the basevalues (default) is ['dx','dy','dz']
APPLICATION:
func = data.baseline(didata,knotstep=0.1,plotbaseline=True)
# fixed time range
func = data.baseline(didata,startabsolute='2015-02-01',endabsolute='2015-08-24',extradays=0)
OR:
funclist = []
funclist.apd(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabsolute='2009-01-01',endabsolute='2009-03-22'))
funclist.apd(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabsolute='2009-03-22',endabsolute='2009-06-27'))
funclist.apd(rawdata.baseline(basevalues, extradays=0, fitfunc='spline',
knotstep=0.2,startabsolute='2009-06-27',endabsolute='2010-02-01'))
stabilitytest (bool)
"""
keys = kwargs.get('keys')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays',15)
plotbaseline = kwargs.get('plotbaseline')
plotfilename = kwargs.get('plotfilename')
startabsolute = kwargs.get('startabsolute')
endabsolute = kwargs.get('endabsolute')
orgstartabsolute = None
orgendabsolute = None
#if not extradays:
# extradays = 15
if not fitfunc:
fitfunc = self.header.get('DataAbsFunc')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = self.header.get('DataAbsDegree')
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = self.header.get('DataAbsKnots')
if not knotstep:
knotstep = 0.3
if not keys:
keys = ['<KEY>']
if len(self.ndnumset[0]) > 0:
ndtype = True
starttime = bn.get_min(self.ndnumset[0])
endtime = bn.get_max(self.ndnumset[0])
else:
starttime = self[0].time
endtime = self[-1].time
fixstart,fixend = False,False
if startabsolute:
startabsolute = date2num(self._testtime(startabsolute))
orgstartabsolute = startabsolute
fixstart = True
if endabsolute:
endabsolute = date2num(self._testtime(endabsolute))
orgendabsolute = endabsolute
fixend = True
pierlong = absoluteolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absoluteolutedata.header.get('DataAcquisitionLatitude','')
pierel = absoluteolutedata.header.get('DataElevation','')
pierlocref = absoluteolutedata.header.get('DataAcquisitionReference','')
pierelref = absoluteolutedata.header.get('DataElevationRef','')
#self.header['DataAbsFunc'] = fitfunc
#self.header['DataAbsDegree'] = fitdegree
#self.header['DataAbsKnots'] = knotstep
#self.header['DataAbsDate'] = datetime.strftime(datetime.utcnow(),'%Y-%m-%d %H:%M:%S')
usestepinbetween = False # for better extrapolation
logger.info(' --- Start baseline-correction at %s' % str(datetime.now()))
absoluteolutestream = absoluteolutedata.copy()
#print("Baseline", absoluteolutestream.length())
absoluteolutestream = absoluteolutestream.remove_flagged()
#print("Baseline", absoluteolutestream.length())
#print("Baseline", absoluteolutestream.ndnumset[0])
absolutendtype = False
if len(absoluteolutestream.ndnumset[0]) > 0:
#print ("HERE1: adopting time range absoluteolutes - before {} {}".format(startabsolute, endabsolute))
absoluteolutestream.ndnumset[0] = absoluteolutestream.ndnumset[0].convert_type(float)
absolutendtype = True
if not bn.get_min(absoluteolutestream.ndnumset[0]) < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absoluteolute measurements ")
absolutet = absoluteolutestream.ndnumset[0]
if not startabsolute or startabsolute < bn.get_min(absoluteolutestream.ndnumset[0]):
startabsolute = bn.get_min(absoluteolutestream.ndnumset[0])
if not endabsolute or endabsolute > bn.get_max(absoluteolutestream.ndnumset[0]):
endabsolute = bn.get_max(absoluteolutestream.ndnumset[0])
else:
# 1) test whether absoluteolutes are in the selected absoluteolute data stream
if absoluteolutestream[0].time == 0 or absoluteolutestream[0].time == float('nan'):
raise ValueError ("Baseline: Ibnut stream needs to contain absoluteolute data ")
# 2) check whether enddate is within absolute time range or larger:
if not absoluteolutestream[0].time-1 < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absoluteolute measurements ")
absolutet = absoluteolutestream._get_column('time')
startabsolute = absoluteolutestream[0].time
endabsolute = absoluteolutestream[-1].time
# Initialze orgstartabd and orgendabsolute if not yet provided: orgabsolute values will be add_concated to DataAbsInfo
if not orgstartabsolute:
orgstartabsolute = startabsolute
if not orgendabsolute:
orgendabsolute = endabsolute
#print ("HERE2a: Time range absoluteolutes - {} {} {} {}".format(startabsolute, endabsolute, num2date(startabsolute), num2date(endabsolute)))
#print ("HERE2b: Time range datastream - {} {}".format(starttime, endtime))
# 3) check time ranges of stream and absoluteolute values:
if startabsolute > starttime:
#print ('HERE2c: First absoluteolute value measured after beginning of stream')
#logger.warning('Baseline: First absoluteolute value measured after beginning of stream - duplicating first absolute value at beginning of time series')
#if fixstart:
#
#absoluteolutestream.add_concat(absoluteolutestream[0])
#absoluteolutestream[-1].time = starttime
#absoluteolutestream.sorting()
logger.info('Baseline: %d days without absoluteolutes at the beginning of the stream' % int(bn.floor(bn.get_min(absolutet)-starttime)))
if endabsolute < endtime:
logger.info("Baseline: Last absoluteolute measurement before end of stream - extrapolating baseline")
if num2date(endabsolute).replace(tzinfo=None) + timedelta(days=extradays) < num2date(endtime).replace(tzinfo=None):
usestepinbetween = True
if not fixend:
logger.warning("Baseline: Well... thats an adventurous extrapolation, but as you wish...")
starttime = num2date(starttime).replace(tzinfo=None)
endtime = num2date(endtime).replace(tzinfo=None)
# 4) get standard time rang of one year and extradays at start and end
# test whether absolutestream covers this time range including extradays
# ###########
# get boundaries
# ###########
extrapolate = False
# upper
if fixend:
#absoluteolutestream = absoluteolutestream.trim(endtime=endabsolute) # should I trim here already - leon ??
# time range long enough
baseendtime = endabsolute+extradays
if baseendtime < orgendabsolute:
baseendtime = orgendabsolute
extrapolate = True
else:
baseendtime = date2num(endtime+timedelta(days=1))
extrapolate = True
#if endabsolute >= date2num(endtime)+extradays:
# # time range long enough
# baseendtime = date2num(endtime)+extradays
# lower
if fixstart:
#absoluteolutestream = absoluteolutestream.trim(starttime=startabsolute) # should I trim here already - leon ??
basestarttime = startabsolute-extradays
if basestarttime > orgstartabsolute:
basestarttime = orgstartabsolute
extrapolate = True
else:
# not long enough
#basestarttime = date2num(starttime)
basestarttime = startabsolute-extradays
extrapolate = True
if baseendtime - (366.+2*extradays) > startabsolute:
# time range long enough
basestarttime = baseendtime-(366.+2*extradays)
baseendtime = num2date(baseendtime).replace(tzinfo=None)
basestarttime = num2date(basestarttime).replace(tzinfo=None)
#print ("HERE3a: basestart and end", basestarttime, baseendtime)
# Don't use trim here
#bas = absoluteolutestream.trim(starttime=basestarttime,endtime=baseendtime)
basnumset = absoluteolutestream._select_timerange(starttime=basestarttime,endtime=baseendtime)
bas = DataStream([LineStruct()],absoluteolutestream.header,basnumset)
#print ("HERE3b: length of selected absoluteolutes: ", bas.length()[0])
if extrapolate: # and not extradays == 0:
bas = bas.extrapolate(basestarttime,baseendtime)
#keys = ['<KEY>']
try:
print ("Fitting Baseline between: {a} and {b}".format(a=str(num2date(bn.get_min(bas.ndnumset[0]))),b=str(num2date(bn.get_max(bas.ndnumset[0])))))
print (keys, fitfunc, fitdegree, knotstep)
logger.info("Fitting Baseline between: {a} and {b}".format(a=str(num2date(bn.get_min(bas.ndnumset[0]))),b=str(num2date(bn.get_max(bas.ndnumset[0])))))
#print ("Baseline", bas.length(), keys)
#for elem in bas.ndnumset:
# print elem
func = bas.fit(keys,fitfunc=fitfunc,fitdegree=fitdegree,knotstep=knotstep)
except:
print ("Baseline: Error when deterget_mining fit - Enough data point to satisfy fit complexity?")
logger.error("Baseline: Error when deterget_mining fit - Not enough data point to satisfy fit complexity? N = {}".format(bas.length()))
return None
#if len(keys) == 3:
# ix = KEYLIST.index(keys[0])
# iy = KEYLIST.index(keys[1])
# iz = KEYLIST.index(keys[2])
# get the function in some readable equation
#self.header['DataAbsDataT'] = bas.ndnumset[0],bas.ndnumset[ix],bas.ndnumset[iy],bas.ndnumset[iz]]
if plotbaseline:
#check whether plotbaseline is valid path or bool
try:
try:
import magpy.mpplot as mp
except ImportError:
print ("baseline: Could not load package mpplot")
if plotfilename:
mp.plot(bas,variables=['dx','dy','dz'],padd_concating = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data',outfile=plotfilename)
else:
mp.plot(bas,variables=['dx','dy','dz'],padd_concating = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data')
except:
print("using the internal plotting routine requires mpplot to be imported as mp")
keystr = '_'.join(keys)
pierlong = absoluteolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absoluteolutedata.header.get('DataAcquisitionLatitude','')
pierel = absoluteolutedata.header.get('DataElevation','')
pierlocref = absoluteolutedata.header.get('DataLocationReference','')
pierelref = absoluteolutedata.header.get('DataElevationRef','')
if not pierlong == '' and not pierlat == '' and not pierel == '':
absoluteinfostring = '_'.join(map(str,[orgstartabsolute,orgendabsolute,extradays,fitfunc,fitdegree,knotstep,keystr,pierlong,pierlat,pierlocref,pierel,pierelref]))
else:
absoluteinfostring = '_'.join(map(str,[orgstartabsolute,orgendabsolute,extradays,fitfunc,fitdegree,knotstep,keystr]))
existingabsoluteinfo = self.header.get('DataAbsInfo','').replace(', EPSG',' EPSG').sep_split(',')
if not existingabsoluteinfo[0] == '':
existingabsoluteinfo.apd(absoluteinfostring)
else:
existingabsoluteinfo = [absoluteinfostring]
# Get get_minimum and get_maximum times out of existing absoluteinfostream
get_minstarttime=100000000.0
get_maxendtime=0.0
for el in existingabsoluteinfo:
ele = el.sep_split('_')
get_mintime = float(ele[0])
get_maxtime = float(ele[1])
if get_minstarttime > get_mintime:
get_minstarttime = get_mintime
if get_maxendtime < get_maxtime:
get_maxendtime = get_maxtime
exabsolutestring = ','.join(existingabsoluteinfo)
self.header['DataAbsInfo'] = exabsolutestring # 735582.0_735978.0_0_spline_5_0.3_dx_dy_dz
#print ("HERE5a:", get_minstarttime, get_maxendtime, absoluteolutestream.length()[0])
bas2save = absoluteolutestream.trim(starttime=get_minstarttime,endtime=get_maxendtime)
tmpdict = bas2save.stream2dict()
#print ("HERE5b:", bas2save.length()[0])
self.header['DataBaseValues'] = tmpdict['DataBaseValues']
# Get column heads of dx,dy and dz
# default is H-base[nT],D-base[deg],Z-base[nT]
basecomp = "HDZ"
try:
basecomp = "{}{}{}".format(absoluteolutestream.header.get('col-dx')[0],absoluteolutestream.header.get('col-dy')[0],absoluteolutestream.header.get('col-dz')[0])
except:
pass
if not basecomp == "HDZ":
print (" -> basevalues correspond to components {}".format(basecomp))
self.header['DataBaseComponents'] = basecomp
#self.header['DataAbsMinTime'] = func[1] #num2date(func[1]).replace(tzinfo=None)
#self.header['DataAbsMaxTime'] = func[2] #num2date(func[2]).replace(tzinfo=None)
#self.header['DataAbsFunctionObject'] = func
logger.info(' --- Finished baseline-correction at %s' % str(datetime.now()))
return func
def stream2dict(self, keys=['dx','dy','dz'], dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert stream contents into a list and assign this to a dictionary.
You can use this method to directly store magnetic basevalues along with
data time series (e.g. using NasaCDF). Multilayer storage as supported by NetCDF
might provide better options to combine both data sets in one file.
PARAMETERS:
stream (DataStream) data containing e.g. basevalues
keys (list of keys) keys which are going to be stored
dictkey (string) name of the dictionaries key
RETURNS:
dict (dictionary) with name dictkey
APPLICATION:
>>> d = absolutedata.stream2dict(['dx','dy','dz'],'DataBaseValues')
>>> d = neicdata.stream2dict(['f','str3'],'Earthquakes')
"""
if not self.length()[0] > 0:
return {}
if not len(keys) > 0:
return {}
d = {}
keylst = ['time']
keylst.extend(keys)
numset,headline,add_concatline = [],[],[]
for key in keylst:
try:
pos = KEYLIST.index(key)
except ValueError:
pos = -1
if pos in range(0,len(KEYLIST)):
headline.apd(key)
if not key == 'time':
add_concatline.apd(self.header.get('col-'+key))
else:
add_concatline.apd(self.header.get('DataID'))
column = self.ndnumset[pos]
numset.apd(column)
rowlst = bn.switching_places(bn.asnumset(numset)).convert_type(object)
full_value_funclst = bn.stick(rowlst,0,bn.asnumset(add_concatline).convert_type(object),axis=0) ##could be used to store column names and id in time column
full_value_funclst = bn.stick(full_value_funclst,0,bn.asnumset(headline).convert_type(object),axis=0)
d[dictkey] = full_value_funclst
return d
def dict2stream(self,dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert the list stored in stream.header['DataBaseValue']
to an absoluteolute stream.
PARAMETERS:
stream (DataStream) stream with variation data
dictkey (string) usustotaly 'DataBaseValues'
RETURNS:
stream (DataStream) containing values of header info
APPLICATION:
>>> absolutestream = stream.dict2stream(header['DataBaseValues'])
"""
lst = self.header.get(dictkey)
if not type(lst) in (list,tuple,bn.ndnumset):
print("dict2stream: no list,tuple,numset found in provided header key")
return DataStream()
if len(lst) == 0:
print("dict2stream: list is empty")
return DataStream()
numset = [[] for el in KEYLIST]
headerinfo = lst[0]
add_concatinfo = lst[1]
data = lst[2:]
#print(headerinfo,add_concatinfo)
collst = bn.switching_places(bn.asnumset(data)).convert_type(object)
#print(collst)
for idx,key in enumerate(headerinfo):
pos = KEYLIST.index(key)
numset[pos] = collst[idx]
return DataStream([LineStruct()], {}, bn.asnumset(numset,dtype=object))
def baselineAdvanced(self, absolutedata, baselist, **kwargs):
"""
DESCRIPTION:
reads stream, didata and baseline list
-> save separate monthly cdf's for each baseline ibnut
-> Filename contains date of baseline jump
RETURNS:
list of header and ndnumset -> this is necessary for datastreams
"""
sensid = kwargs.get('sensorid')
plotbaseline = kwargs.get('plotbaseline')
data = self.copy()
# Get start and endtime of stream
ts,te = data._find_t_limits()
# Get start and endtime of di data
tabsolutes,tabsolutee = absolutedata._find_t_limits()
# Some checks
if tabsolutes > te or tabsolutee < ts:
print ("baselineAdvanced: No DI data for selected stream available -aborting")
return False
if tabsolutes > ts:
print ("baselineAdvanced: DI data does not cover the time range of stream - trimget_ming stream")
data = data.trim(starttime=tabsolutes)
if tabsolutee < te:
print ("baselineAdvanced: DI data does not cover the time range of stream - trimget_ming stream")
data = data.trim(endtime=tabsolutee)
# Getting relevant baseline info
sensid = self.header.get('SensorID','')
if sensid == '':
print ("baselineAdvanced: No SensorID in header info - provide by option sensorid='XXX'")
return False
indlist = [ind for ind, elem in enumerate(baselist[0]) if elem == sensid]
#print "writeBC", indlist
senslist = [[el for idx,el in enumerate(elem) if idx in indlist] for elem in baselist]
#print "writeBC", senslist
#print "writeBC", senslist[1]
if not len(senslist) > 0:
print ("baselineAdvanced: Did not find any_condition valid baseline parameters for selected sensor")
return False
# get index of starttime closest before
beforeinds = [[ind,bn.absolute(date2num(ts)-elem)] for ind, elem in enumerate(senslist[1]) if elem < date2num(ts)]
#print "writeBC", beforeinds
get_minl = [el[1] for el in beforeinds]
#print "writeBC get_minl", get_minl
startind = beforeinds[get_minl.index(bn.get_min(get_minl))][0]
#print "writeBC", startind
vtotalist = [[el for idx,el in enumerate(elem) if idx == startind] for elem in senslist]
#print vtotalist
validinds = [ind for ind, elem in enumerate(senslist[1]) if elem >= date2num(ts) and elem <= date2num(te)]
#print "writeBC inds", validinds
vtotalist2 = [[el for idx,el in enumerate(elem) if idx in validinds] for elem in senslist]
#print vtotalist2
if len(vtotalist2[0]) > 0:
resultlist = []
for idx, elem in enumerate(vtotalist):
add_concatelem = vtotalist2[idx]
print(elem, add_concatelem)
elem.extend(add_concatelem)
resultlist.apd(elem)
else:
resultlist = vtotalist
print("baselineAdvanced: inds", resultlist)
# Select appropriate time ranges from stream
if not len(resultlist[0]) > 0:
print ("baselineAdvanced: Did not find any_condition valid baseline parameters for selected sensor")
return False
streamlist = []
dictlist = []
resultlist = bn.asnumset(resultlist)
vals = resultlist.switching_places()
for idx, elem in enumerate(vals):
#print "writeBC running", elem
get_mintime = float(elem[1])
get_maxtime = float(elem[2])
numset = data._select_timerange(starttime=get_mintime, endtime=get_maxtime)
stream = DataStream(data,data.header,numset)
baselinefunc = stream.baseline(absolutedata,startabsolute=get_mintime,endabsolute=get_maxtime, fitfunc=elem[3],fitdegree=int(elem[4]),knotstep=float(elem[5]),plotbaseline=plotbaseline)
#stream = stream.bc()
#exec('stream'+str(idx)+'= DataStream(stream,stream.header,stream.ndnumset)')
dicthead = stream.header
#dictlist.apd(dicthead.copy()) # Note: apd just add_concats a pointer to content - use copy
#streamlist.apd([dicthead.copy(),stream.ndnumset])
streamlist.apd([DataStream([LineStruct()],dicthead.copy(),stream.ndnumset),baselinefunc])
#print "Streamlist", streamlist
#print len(dicthead),dictlist
return streamlist
def bc(self, function=None, ctype=None, alpha=0.0,level='preliget_minary'):
"""
DEFINITION:
Method to obtain baseline corrected data. By default flagged data is removed
before baseline correction.
Requires DataAbs values in the datastreams header.
The function object is transferred to keys x,y,z, please note that the baseline function
is stored in HDZ format (H:nT, D:0.0000 deg, Z: nT).
By default the bc method requires HDZ oriented variometer data. If XYZ data is provided,
or any_condition other orientation, please provided rotation angles to transform this data into HDZ.
Example: For XYZ data please add_concat the option alpha=DeclinationAtYourSite in a
float format of 0.00000 deg
PARAMETERS:
function (function object) provide the function directly - not from header
ctype (string) one of 'fff', 'fdf', 'ddf' - denoting nT components 'f' and degree 'd'
alpha/beta (floats) provide rotation angles for the variometer data to be applied
before correction - data is rotated back after correction
"""
logger.debug("BC: Perforget_ming baseline correction: Requires HEZ data.")
logger.debug(" H magnetic North, E magnetic East, Z vertical downwards, total in nT.")
pierdata = False
absoluteinfostring = self.header.get('DataAbsInfo')
absolutevalues = self.header.get('DataBaseValues')
func = self.header.get('DataAbsFunctionObject')
datatype = self.header.get('DataType')
basecomp = self.header.get('DataBaseComponents')
if datatype == 'BC':
print ("BC: dataset is already baseline corrected - returning")
return self
bcdata = self.copy()
logger.debug("BC: Components of stream: {}".format(self.header.get('DataComponents')))
logger.debug("BC: baseline adoption information: {}".format(absoluteinfostring))
if absoluteinfostring and type(absolutevalues) in [list,bn.ndnumset,tuple]:
#print("BC: Found baseline adoption information in meta data - correcting")
absoluteinfostring = absoluteinfostring.replace(', EPSG',' EPSG')
absoluteinfostring = absoluteinfostring.replace(',EPSG',' EPSG')
absoluteinfostring = absoluteinfostring.replace(', epsg',' EPSG')
absoluteinfostring = absoluteinfostring.replace(',epsg',' EPSG')
absoluteinfolist = absoluteinfostring.sep_split(',')
funclist = []
for absoluteinfo in absoluteinfolist:
#print("BC: TODO duplicate correction several times and check header info")
# extract baseline data
absolutestream = bcdata.dict2stream()
#print("BC: absolutetream length", absolutestream.length()[0])
parameter = absoluteinfo.sep_split('_')
#print("BC:", parameter, len(parameter))
funckeys = parameter[6:9]
if len(parameter) >= 14:
#extract pier information
pierdata = True
pierlon = float(parameter[9])
pierlat = float(parameter[10])
pierlocref = parameter[11]
pierel = float(parameter[12])
pierelref = parameter[13]
#print("BC", num2date(float(parameter[0])))
#print("BC", num2date(float(parameter[1])))
if not funckeys == ['df']:
func = bcdata.baseline(absolutestream, startabsolute=float(parameter[0]), endabsolute=float(parameter[1]), extradays=int(float(parameter[2])), fitfunc=parameter[3], fitdegree=int(float(parameter[4])), knotstep=float(parameter[5]), keys=funckeys)
if 'dx' in funckeys:
func[0]['fx'] = func[0]['fdx']
func[0]['fy'] = func[0]['fdy']
func[0]['fz'] = func[0]['fdz']
func[0].pop('fdx', None)
func[0].pop('fdy', None)
func[0].pop('fdz', None)
keys = ['x','y','z']
elif 'x' in funckeys:
keys = ['x','y','z']
else:
print("BC: could not interpret BaseLineFunctionObject - returning")
return self
funclist.apd(func)
#TODO add_concatbaseline
#if AbsData contain xyz use mode='add_concat'
datacomp = bcdata.header.get('DataComponents','')
if basecomp in ['xyz','XYZ']:
bcdata = bcdata.func2stream(funclist,mode='add_concat',keys=keys)
bcdata.header['col-x'] = 'X'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'Y'
bcdata.header['unit-col-y'] = 'nT'
if len(datacomp) == 4:
bcdata.header['DataComponents'] = 'XYZ'+datacomp[3]
else:
bcdata.header['DataComponents'] = 'XYZ'
else:
#print ("BC: Found a list of functions:", funclist)
bcdata = bcdata.func2stream(funclist,mode='add_concatbaseline',keys=keys)
bcdata.header['col-x'] = 'H'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'D'
bcdata.header['unit-col-y'] = 'deg'
datacomp = bcdata.header.get('DataComponents','')
if len(datacomp) == 4:
bcdata.header['DataComponents'] = 'HDZ'+datacomp[3]
else:
bcdata.header['DataComponents'] = 'HDZ'
# Add BC mark to datatype - data is baseline corrected
bcdata.header['DataType'] = 'BC'
# Update location data from absoluteinfo
if pierdata:
self.header['DataAcquisitionLongitude'] = pierlon
self.header['DataAcquisitionLatitude'] = pierlat
self.header['DataLocationReference'] = pierlocref
self.header['DataElevation'] = pierel
self.header['DataElevationRef'] = pierelref
return bcdata
elif func:
# 1.) move content of basevalue function to columns 'x','y','z'?
try:
func[0]['fx'] = func[0]['fdx']
func[0]['fy'] = func[0]['fdy']
func[0]['fz'] = func[0]['fdz']
func[0].pop('fdx', None)
func[0].pop('fdy', None)
func[0].pop('fdz', None)
keys = ['<KEY>']
except:
print("BC: could not interpret BaseLineFunctionObject - returning")
return self
# 2.) eventutotaly transform self - check header['DataComponents']
if ctype == 'fff':
pass
elif ctype == 'ddf':
pass
else:
pass
#eventutotaly use other information like absoluteolute path, and function parameter
#for key in self.header:
# if key.startswith('DataAbs'):
# print key, self.header[key]
# drop total lines with nan values in either x or y and if x=0 add_concat some 0.00001 because of arctan(y/x)
#print len(self.ndnumset[0])
#for elem in self.ndnumset[1]:
# if bn.ifnan(elem) or elem == 0.0:
# print "Found", elem
#self = self._drop_nans('x')
#self = self._drop_nans('y')
#print len(self.ndnumset[0])
bcdata = bcdata.func2stream(func,mode='add_concatbaseline',keys=['x','y','z'])
bcdata.header['col-x'] = 'H'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'D'
bcdata.header['unit-col-y'] = 'deg'
bcdata.header['DataComponents'] = 'HDZ'
return bcdata
else:
print("BC: No data for correction available - header needs to contain DataAbsFunctionObject")
return self
def bindetector(self,key,flagnum=1,keystoflag=['x'],sensorid=None,text=None,**kwargs):
"""
DEFINITION:
Function to detect changes between 0 and 1 and create a flaglist for zero or one states
PARAMETERS:
key: (key) key to inverseestigate
flagnum: (int) integer between 0 and 4, default is 0
keystoflag: (list) list of keys to be flagged
sensorid: (string) sensorid for flaglist, default is sensorid of self
text: (string) text to be add_concated to comments/standard_opout,
will be extended by on/off
Kwargs:
marktotalon: (BOOL) add_concat comment to total ons
marktotaloff: (BOOL) add_concat comment to total offs
onvalue: (float) critical value to deterget_min on stage (default = 0.99)
RETURNS:
- flaglist
EXAMPLE:
>>> flaglist = stream.bindetector('z',0,'x',SensorID,'Maintanence switch for rain bucket',marktotalon=True)
"""
marktotalon = kwargs.get('marktotalon')
marktotaloff = kwargs.get('marktotaloff')
onvalue = kwargs.get('onvalue')
if not marktotalon and not marktotaloff:
marktotalon = True
if not onvalue:
onvalue = 0.99
if not sensorid:
sensorid = self.header.get('SensorID')
if not len(self.ndnumset[0]) > 0:
print ("bindetector: No ndnumset data found - aborting")
return self
moddate = datetime.utcnow()
ind = KEYLIST.index(key)
startstate = self.ndnumset[ind][0]
flaglist=[]
# Find switching states (Joe Kington: http://pile_operationoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-beatnum-numset)
d = bn.difference(self.ndnumset[ind])
idx, = d.nonzero()
idx += 1
if marktotalon:
if not text:
text = 'on'
if self.ndnumset[ind][0]:
# If the start of condition is True prepend a 0
idx = bn.r_[0, idx]
if self.ndnumset[ind][-1]:
# If the end of condition is True, apd the length of the numset
idx = bn.r_[idx, self.ndnumset[ind].size] # Edit
# Reshape the result into two columns
#print("Bindetector", idx, idx.size)
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(self.ndnumset[0][start]).replace(tzinfo=None),num2date(self.ndnumset[0][stop]).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.apd(flagline)
if marktotaloff:
if not text:
text = 'off'
if not self.ndnumset[ind][0]:
# If the start of condition is True prepend a 0
idx = bn.r_[0, idx]
if not self.ndnumset[ind][-1]:
# If the end of condition is True, apd the length of the numset
idx = bn.r_[idx, self.ndnumset[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(self.ndnumset[0][start]).replace(tzinfo=None),num2date(self.ndnumset[0][stop]).replace(tzinfo=None),elem,int(flagid),text,sensorid,moddate]
flaglist.apd(flagline)
return flaglist
def calc_f(self, **kwargs):
"""
DEFINITION:
Calculates the f form x^2+y^2+z^2. If delta F is present, then by default
this value is add_concated as well
PARAMETERS:
Kwargs:
- offset: (numset) containing three elements [xoffset,yoffset,zoffset],
- skipdelta (bool) id selecetd then an existing delta f is not accounted for
RETURNS:
- DataStream with f and, if given, offset corrected xyz values
EXAMPLES:
>>> fstream = stream.calc_f()
>>> fstream = stream.calc_f(offset=[20000,0,43000])
"""
# Take care: if there is only 0.1 nT accuracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
skipdelta = kwargs.get('skipdelta')
if not offset:
offset = [0,0,0]
else:
if not len(offset) == 3:
logger.error('calc_f: offset with wrong dimension given - needs to contain a three dim numset like [a,b,c] - returning stream without changes')
return self
ndtype = False
try:
if len(self.ndnumset[0]) > 0:
ndtype = True
elif len(self) > 1:
ndtype = False
else:
logger.error('calc_f: empty stream - aborting')
return self
except:
logger.error('calc_f: inapropriate data provided - aborting')
return self
logger.info('calc_f: --- Calculating f started at %s ' % str(datetime.now()))
if ndtype:
inddf = KEYLIST.index('df')
indf = KEYLIST.index('f')
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
indz = KEYLIST.index('z')
if len(self.ndnumset[inddf]) > 0 and not skipdelta:
df = self.ndnumset[inddf].convert_type(float)
else:
df = bn.asnumset([0.0]*len(self.ndnumset[indx]))
x2 = ((self.ndnumset[indx]+offset[0])**2).convert_type(float)
y2 = ((self.ndnumset[indy]+offset[1])**2).convert_type(float)
z2 = ((self.ndnumset[indz]+offset[2])**2).convert_type(float)
self.ndnumset[indf] = bn.sqrt(x2+y2+z2) + df
else:
for elem in self:
elem.f = bn.sqrt((elem.x+offset[0])**2+(elem.y+offset[1])**2+(elem.z+offset[2])**2)
self.header['col-f'] = 'f'
self.header['unit-col-f'] = 'nT'
logger.info('calc_f: --- Calculating f finished at %s ' % str(datetime.now()))
return self
def compensation(self, **kwargs):
"""
DEFINITION:
Method for magnetic variometer data:
Applies eventutotaly present compensation field values in the header
to the vector x,y,z.
Compensation fields are provided in mirco Tesla (according to LEMI data).
Please note that any_condition add_concatitional provided "DataDeltaValues" are also applied
by default (to avoid use option skipdelta=True).
Calculation:
This method uses header information data.header[''].
After successfull_value_func application data.header['DeltaValuesApplied']
is set to 1.
PARAMETERS:
Kwargs:
- skipdelta (bool) if True then DataDeltaValues are ignored
RETURNS:
- DataStream with compensation values appliesd to xyz values
- original dataStream if no compensation values are found
EXAMPLES:
>>> compstream = stream.compensation()
"""
skipdelta = kwargs.get('skipdelta')
if not self.length()[0] > 0:
return self
stream = self.copy()
logger.info("compensation: applying compensation field values to variometer data ...")
deltas = stream.header.get('DataDeltaValues','')
if not skipdelta and not deltas=='':
logger.info("compensation: applying delta values from header['DataDeltaValues'] first")
stream = stream.offset(deltas)
stream.header['DataDeltaValuesApplied'] = 1
offdict = {}
xcomp = stream.header.get('DataCompensationX','0')
ycomp = stream.header.get('DataCompensationY','0')
zcomp = stream.header.get('DataCompensationZ','0')
if not float(xcomp)==0.:
offdict['x'] = -1*float(xcomp)*1000.
if not float(ycomp)==0.:
offdict['y'] = -1*float(ycomp)*1000.
if not float(zcomp)==0.:
offdict['z'] = -1*float(zcomp)*1000.
logger.info(' -- applying compensation fields: x={}, y={}, z={}'.format(xcomp,ycomp,zcomp))
if len(offdict) > 0:
stream = stream.offset(offdict)
stream.header['DataDeltaValuesApplied'] = 1
return stream
def cut(self,length,kind=0,order=0):
"""
DEFINITION:
cut returns the selected amount of lines from datastreams
PARAMETER:
stream : datastream
length : provide the amount of lines to be returned (default: percent of stream length)
kind : define the kind of length parameter
= 0 (default): length is given in percent
= 1: length is given in number of lines
order : define from which side
= 0 (default): the last amount of lines are returned
= 1: lines are counted from the beginning
VERSION:
add_concated in MagPy 0.4.6
APPLICATION:
# length of stream: 86400
cutstream = stream.cut(50)
# length of cutstream: 43200
"""
stream = self.copy()
if length <= 0:
print ("get_last: length needs to be > 0")
return stream
if kind == 0:
if length > 100:
length = 100
amount = int(stream.length()[0]*length/100.)
else:
if length > stream.length()[0]:
return stream
else:
amount = length
for idx,el in enumerate(stream.ndnumset):
if len(el) >= amount:
if order == 0:
nel = el[-amount:]
else:
nel = el[:amount]
stream.ndnumset[idx] = nel
return stream
def dailyaverages(self, keys=['x','<KEY>'], offset = 0.5, keepposition=False, **kwargs):
"""
DEFINITION:
Calculates daily averages of xyz components and their standard deviations. By default
beatnum's average and standard_op methods are applied even if only two data sets are available.
TODO ---
If less then three data sets are provided, twice the differenceerence between two values
is used as an conservative proxy of uncertainty. I only on value is available, then
the get_maximum uncertainty of the collection is astotal_counted. This behavior can be changed
by keyword arguments.
TODO ---
An outputstream is generated which containes basevalues in columns
x,y,z and uncertainty values in dx,dy,dz
if only a single values is available, dx,dy,dz contain the average uncertainties
of the full_value_func data set
time column contains the average time of the measurement
PARAMETERS:
Variables
- keys: (list) provide up to four keys which are used in columns x,y,z
- offset: (float) offset in timeunit days (0 to 0.999) default is 0.5, some test might use 0
Kwargs:
- none
RETURNS:
- stream: (DataStream object) with daily averages and standard deviation
EXAMPLE:
>>> averages = didata.dailyaverages(keys=['dx','dy','dz'])
APPLICATION:
>>> averages = didata.dailyaverages(keys=['dx','dy','dz'])
>>> mp.plot(averages,['x','y','z'],errorbars=True, symbollist=['o','o','o'])
"""
percentage = 90
keys = keys[:4]
poslst,deltaposlst = [],[]
deltakeys = ['dx','dy','dz','df']
for key in keys:
poslst.apd(KEYLIST.index(key))
for idx,pos in enumerate(poslst):
deltaposlst.apd(KEYLIST.index(deltakeys[idx]))
if not len(self.ndnumset[0]) > 0:
return self
numset = [[] for el in KEYLIST]
data = self.copy()
data = data.removeduplicates()
timecol = bn.floor(data.ndnumset[0])
tmpdatelst = bn.asnumset(list(set(list(timecol))))
for day in tmpdatelst:
sel = data._select_timerange(starttime=day,endtime=day+1)
"""
#for idx,day in enumerate(daylst):
#sel = final._select_timerange(starttime=bn.round(day), endtime=bn.round(day)+1)
"""
#print (len(sel))
sttmp = DataStream([LineStruct()],{},sel)
numset[0].apd(day+offset)
for idx, pos in enumerate(poslst):
#if len(sttmp.ndnumset[idx+1]) > 0:
if not keepposition:
numset[idx+1].apd(sttmp.average(KEYLIST[pos],percentage=percentage))
else:
numset[pos].apd(sttmp.average(KEYLIST[pos],percentage=percentage))
#print ("Check", KEYLIST[pos], idx+1, len(sttmp._get_column(KEYLIST[pos])),sttmp._get_column(KEYLIST[pos]),sttmp.average(KEYLIST[pos],percentage=percentage))
"""
#numset[0].apd(day+0.5)
#for idx,pos in enumerate(poslst):
numset[idx+1].apd(bn.average(sel[pos],percentage=percentage))
"""
data.header['col-'+KEYLIST[idx+1]] = '{}'.format(self.header.get('col-'+KEYLIST[pos]))
data.header['unit-col-'+KEYLIST[idx+1]] = '{}'.format(self.header.get('unit-col-'+KEYLIST[pos]))
difference = pos-idx
if not keepposition:
for idx,dpos in enumerate(deltaposlst):
#if len(sttmp.ndnumset[idx]) > 0:
me,standard_op = sttmp.average(KEYLIST[idx+difference],percentage=percentage, standard_op=True)
numset[dpos].apd(standard_op)
#numset[dpos].apd(bn.standard_op(sel[idx+difference]))
data.header['col-'+KEYLIST[dpos]] = 'sigma {}'.format(self.header.get('col-'+KEYLIST[idx+difference]))
data.header['unit-col-'+KEYLIST[dpos]] = '{}'.format(self.header.get('unit-col-'+KEYLIST[idx+difference]))
data.header['DataFormat'] = 'MagPyDailyMean'
numset = [bn.asnumset(el) for el in numset]
retstream = DataStream([LineStruct()],data.header,bn.asnumset(numset))
retstream = retstream.sorting()
return retstream
def date_offset(self, offset):
"""
IMPORTANT:
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
THIS METHOD IS NOT SUPPORTED ANY MORE. PLEASE USE
self.offset({'time':timedelta(seconds=1000)}) INSTEAD
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DEFINITION:
Corrects the time column of the selected stream by the offst
offset is a timedelta object (e.g. timedelta(hours=1))
PARAMETERS:
Variables:
- offset: (timedelta object) Offset to apply to stream.
Kwargs:
- None
RETURNS:
- stream: (DataStream object) Stream with offset applied.
EXAMPLE:
>>> data = data.offset(timedelta(get_minutes=3))
APPLICATION:
"""
header = self.header
newstream = DataStream()
numset = self.ndnumset
if len(ndnumset[0]) > 0:
ndtype = True
secsperday = 3600*24
ndnumset[0] = ndnumset[0] + offset.total_seconds/secsperday
for elem in self:
newtime = num2date(elem.time).replace(tzinfo=None) + offset
elem.sectime = elem.time
elem.time = date2num(newtime)
newstream.add_concat(elem)
logger.info('date_offset: Corrected time column by %s sec' % str(offset.total_seconds))
return DataStream(newstream,header,numset)
def delta_f(self, **kwargs):
"""
DESCRIPTION:
Calculates the differenceerence of x+y+z to f and puts the result to the df column
PARAMETER:
keywords:
:type offset: float
:param offset: constant offset to f values
:type digits: int
:param digits: number of digits to be rounded (should equal the ibnut precision)
"""
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
digits = kwargs.get('digits')
if not offset:
offset = 0
if not digits:
digits = 8
logger.info('--- Calculating delta f started at %s ' % str(datetime.now()))
try:
syst = self.header['DataComponents']
except:
syst = None
ind = KEYLIST.index("df")
indx = KEYLIST.index("x")
indy = KEYLIST.index("y")
indz = KEYLIST.index("z")
indf = KEYLIST.index("f")
if len(self.ndnumset[0])>0 and len(self.ndnumset[indx])>0 and len(self.ndnumset[indy])>0 and len(self.ndnumset[indz])>0 and len(self.ndnumset[indf])>0:
# requires x,y,z and f
arx = self.ndnumset[indx]**2
ary = self.ndnumset[indy]**2
arz = self.ndnumset[indz]**2
if syst in ['HDZ','hdz','HDZF','hdzf','HDZS','hdzs','HDZG','hdzg']:
print("deltaF: found HDZ orientation")
ary = bn.asnumset([0]*len(self.ndnumset[indy]))
total_countar = list(arx+ary+arz)
sqr = bn.sqrt(bn.asnumset(total_countar))
self.ndnumset[ind] = sqr - (self.ndnumset[indf] + offset)
else:
for elem in self:
elem.df = round(bn.sqrt(elem.x**2+elem.y**2+elem.z**2),digits) - (elem.f + offset)
self.header['col-df'] = 'delta f'
self.header['unit-col-df'] = 'nT'
logger.info('--- Calculating delta f finished at %s ' % str(datetime.now()))
return self
def f_from_df(self, **kwargs):
"""
DESCRIPTION:
Calculates the f from the differenceerence of x+y+z and df
PARAMETER:
keywords:
:type offset: float
:param offset: constant offset to f values
:type digits: int
:param digits: number of digits to be rounded (should equal the ibnut precision)
"""
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
digits = kwargs.get('digits')
if not offset:
offset = 0.
if not digits:
digits = 8
logger.info('--- Calculating f started at %s ' % str(datetime.now()))
try:
syst = self.header['DataComponents']
except:
syst = None
ind = KEYLIST.index("df")
indx = KEYLIST.index("x")
indy = KEYLIST.index("y")
indz = KEYLIST.index("z")
indf = KEYLIST.index("f")
if len(self.ndnumset[0])>0 and len(self.ndnumset[indx])>0 and len(self.ndnumset[indy])>0 and len(self.ndnumset[indz])>0 and len(self.ndnumset[ind])>0:
# requires x,y,z and f
arx = self.ndnumset[indx]**2
ary = self.ndnumset[indy]**2
arz = self.ndnumset[indz]**2
if syst in ['HDZ','hdz','HDZF','hdzf','HDZS','hdzs','HDZG','hdzg']:
print("deltaF: found HDZ orientation")
ary = bn.asnumset([0]*len(self.ndnumset[indy]))
total_countar = list(arx+ary+arz)
sqr = bn.sqrt(bn.asnumset(total_countar))
self.ndnumset[indf] = sqr - (self.ndnumset[ind] + offset)
else:
for elem in self:
elem.f = round(bn.sqrt(elem.x**2+elem.y**2+elem.z**2),digits) - (elem.df + offset)
self.header['col-f'] = 'f'
self.header['unit-col-f'] = 'nT'
logger.info('--- Calculating f finished at %s ' % str(datetime.now()))
return self
def differenceerentiate(self, **kwargs):
"""
DEFINITION:
Method to differenceerentiate total columns with respect to time.
-- Using successive gradients
PARAMETERS:
Variables:
keys: (list - default ['x','y','z','f'] provide limited key-list
put2key
- keys: (list) Provide limited key-list. default = ['x','y','z','f']
- put2keys: (type) Provide keys to put differenceerentiated keys to.
Default = ['dx','dy','dz','df']
Kwargs:
RETURNS:
- stream: (DataStream) Differentiated data stream, x values in dx, etc..
EXAMPLE:
>>> stream = stream.differenceerentiate(keys=['f'],put2keys=['df'])
APPLICATION:
"""
logger.info('differenceerentiate: Calculating derivative started.')
keys = kwargs.get('keys')
put2keys = kwargs.get('put2keys')
if not keys:
keys = ['<KEY>']
if not put2keys:
put2keys = ['<KEY>']
if len(keys) != len(put2keys):
logger.error('Amount of columns read must be equal to outputcolumns')
return self
stream = self.copy()
ndtype = False
if len(stream.ndnumset[0]) > 0:
t = stream.ndnumset[0].convert_type(float)
ndtype = True
else:
t = stream._get_column('time')
for i, key in enumerate(keys):
if ndtype:
ind = KEYLIST.index(key)
val = stream.ndnumset[ind].convert_type(float)
else:
val = stream._get_column(key)
dval = bn.gradient(bn.asnumset(val))
stream._put_column(dval, put2keys[i])
stream.header['col-'+put2keys[i]] = r"d%s vs dt" % (key)
logger.info('--- derivative obtained at %s ' % str(datetime.now()))
return stream
def DWT_calc(self,key='x',wavelet='db4',level=3,plot=False,outfile=None,
window=5):
"""
DEFINITION:
Discrete wavelet transform (DWT) method of analysing a magnetic signal
to pick out SSCs. This method was taken from Hafez (2013): "Systematic exaget_mination
of the geomagnetic storm sudden commencement using multi resolution analysis."
(NOTE: PyWavelets package must be insttotaled for this method. It should be applied
to 1s data - otherwise the sample window should be changed.)
METHOD:
1. Use the 4th-order Daubechies wavelet filter to calculate the 1st to 3rd details
(D1, D2, D3) of the geomagnetic signal. This is applied to a sliding window of
five samples.
2. The 3rd detail (D3) samples are squared to evaluate the magnitude.
3. The sample window (5) is averaged to avoid ripple effects. (This averages the
returned stream will have ~1/5 the size of the original.)
PARAMETERS:
Variables:
- key: (str) Apply DWT to this key. Default 'x' due to SSCs doget_minating
the horizontal component.
- wavelet: (str) Type of filter to use. Default 'db4' (4th-order Daubechies
wavelet filter) according to Hafez (2013).
- level: (int) Decomposition level. Will calculate details down to this level.
Default 3, also Hafez (2013).
- plot: (bool) If True, will display a plot of A3, D1, D2 and D3.
- outfile: (str) If given, will plot will be saved to 'outfile' path.
- window: (int) Length of sample window. Default 5, i.e. 5s with second data.
RETURNS:
- DWT_stream: (DataStream object) A stream containing the following:
'x': A_n (approximation function)
'var1': D1 (first detail)
'var2': D2 (second detail)
'var3': D3 (third detail)
... will have to be changed if higher details are required.
EXAMPLE:
>>> DWT_stream = stream.DWT_calc(plot=True)
APPLICATION:
# Storm detection using detail 3 (D3 = var3):
from magpy.stream import *
stream = read('LEMI_1s_Data_2014-02-15.cdf') # 2014-02-15 is a good storm example
DWT_stream = stream.DWT_calc(plot=True)
Da_get_min = 0.0005 # nT^2 (get_minimum amplitude of D3 for storm detection)
Dp_get_min = 40 # seconds (get_minimum period of Da > Da_get_min for storm detection)
detection = False
for row in DWT_stream:
if row.var3 >= Da_get_min and detection == False:
timepin = row.time
detection = True
elif row.var3 < Da_get_min and detection == True:
duration = (num2date(row.time) - num2date(timepin)).seconds
if duration >= Dp_get_min:
print "Storm detected!"
print duration, num2date(timepin)
detection = False
"""
# Import required package PyWavelets:
# http://www.pybytes.com/pywavelets/index.html
import pywt
# 1a. Grab numset from stream
data = self._get_column(key)
t_ind = KEYLIST.index('time')
#DWT_stream = DataStream([],{})
DWT_stream = DataStream()
headers = DWT_stream.header
numset = [[] for key in KEYLIST]
x_ind = KEYLIST.index('x')
dx_ind = KEYLIST.index('dx')
var1_ind = KEYLIST.index('var1')
var2_ind = KEYLIST.index('var2')
var3_ind = KEYLIST.index('var3')
i = 0
logger.info("DWT_calc: Starting Discrete Wavelet Transform of key %s." % key)
# 1b. Loop for sliding window
while True:
if i >= (len(data)-window):
break
#row = LineStruct()
# Take the values in the middle of the window (not exact but changes are
# not extreme over standard 5s window)
#row.time = self[i+window/2].time
numset[t_ind].apd(self.ndnumset[t_ind][i+int(window/2)])
data_cut = data[i:i+window]
#row.x = total_count(data_cut)/float(window)
numset[x_ind].apd(total_count(data_cut)/float(window))
# 1c. Calculate wavelet transform coefficients
# Wavedec produces results in form: [cA_n, cD_n, cD_n-1, ..., cD2, cD1]
# (cA_n is a list of coefficients for an approximation for the nth order.
# All cD_n are coefficients for details n --> 1.)
coeffs = pywt.wavedec(data_cut, wavelet, level=level)
# 1d. Calculate approximation and detail functions from coefficients
take = len(data_cut) # (Length of fn from coeffs = length of original data)
functions = []
approx = True
for item in coeffs:
if approx:
part = 'a' # Calculate approximation function
else:
part = 'd' # Calculate detail function
function = pywt.upcoef(part, item, wavelet, level=level, take=take)
functions.apd(function)
approx = False
# 2. Square the results
fin_fns = []
for item in functions:
item_sq = [j**2 for j in item]
# 3. Average over the window
val = total_count(item_sq)/window
fin_fns.apd(val)
# TODO: This is hard-wired for level=3.
#row.dx, row.var1, row.var2, row.var3 = fin_fns
numset[dx_ind].apd(fin_fns[0])
numset[var1_ind].apd(fin_fns[3])
numset[var2_ind].apd(fin_fns[2])
numset[var3_ind].apd(fin_fns[1])
#DWT_stream.add_concat(row)
i += window
logger.info("DWT_calc: Finished DWT.")
DWT_stream.header['col-x'] = 'A3'
DWT_stream.header['unit-col-x'] = 'nT^2'
DWT_stream.header['col-var1'] = 'D1'
DWT_stream.header['unit-col-var1'] = 'nT^2'
DWT_stream.header['col-var2'] = 'D2'
DWT_stream.header['unit-col-var2'] = 'nT^2'
DWT_stream.header['col-var3'] = 'D3'
DWT_stream.header['unit-col-var3'] = 'nT^2'
# Plot stream:
if plot == True:
date = datetime.strftime(num2date(self.ndnumset[0][0]),'%Y-%m-%d')
logger.info('DWT_calc: Plotting data...')
if outfile:
DWT_stream.plot(['x','var1','var2','var3'],
plottitle="DWT Decomposition of %s (%s)" % (key,date),
outfile=outfile)
else:
DWT_stream.plot(['x','var1','var2','var3'],
plottitle="DWT Decomposition of %s (%s)" % (key,date))
#return DWT_stream
return DataStream([LineStruct()], headers, bn.asnumset([bn.asnumset(a) for a in numset]))
def eventlogger(self, key, values, compare=None, stringvalues=None, add_concatcomment=None, debugmode=None):
"""
read stream and log data of which key meets the criteria
maybe combine with extract
Required:
:type key: string
:param key: provide the key to be exaget_mined
:type values: list
:param values: provide a list of three values
:type values: list
:param values: provide a list of three values
Optional:
:type compare: string
:param compare: ">, <, ==, !="
:type stringvalues: list
:param stringvalues: provide a list of exactly the same length as values with the respective comments
:type add_concatcomment: bool
:param add_concatcomment: if true add_concat the stringvalues to the comment line of the datastream
:type debugmode: bool
:param debugmode: provide more information
example:
compare is string like ">, <, ==, !="
st.eventlogger(['var3'],[15,20,30],'>')
"""
assert type(values) == list
if not compare:
compare = '=='
if not compare in ['<','>','<=','>=','==','!=']:
logger.warning('Eventlogger: wrong value for compare: needs to be among <,>,<=,>=,==,!=')
return self
if not stringvalues:
stringvalues = ['Minor storm onset','Moderate storm onset','Major storm onset']
else:
assert type(stringvalues) == list
if not len(stringvalues) == len(values):
logger.warning('Eventlogger: Provided comments do not match amount of values')
return self
for elem in self:
#evaluationstring = 'elem.' + key + ' ' + compare + ' ' + str(values[0])
if eval('elem.'+key+' '+compare+' '+str(values[2])):
stormlogger.warning('%s at %s' % (stringvalues[2],num2date(elem.time).replace(tzinfo=None)))
if add_concatcomment:
if elem.comment == '-':
elem.comment = stringvalues[2]
else:
elem.comment += ', ' + stringvalues[2]
elif eval('elem.'+key+' '+compare+' '+str(values[1])):
stormlogger.warning('%s at %s' % (stringvalues[1],num2date(elem.time).replace(tzinfo=None)))
if add_concatcomment:
if elem.comment == '-':
elem.comment = stringvalues[1]
else:
elem.comment += ', ' + stringvalues[1]
elif eval('elem.'+key+' '+compare+' '+str(values[0])):
stormlogger.warning('%s at %s' % (stringvalues[0],num2date(elem.time).replace(tzinfo=None)))
if add_concatcomment:
if elem.comment == '-':
elem.comment = stringvalues[0]
else:
elem.comment += ', ' + stringvalues[0]
return self
def extract(self, key, value, compare=None, debugmode=None):
"""
DEFINITION:
Read stream and extract data of the selected key which meets the choosen criteria
PARAMETERS:
Variables:
- key: (str) streams key e.g. 'x'.
- value: (str/float/int) any_condition selected ibnut which should be tested for
special note: if value is in brackets, then the term is evaluated
e.g. value="('int(elem.time)')" selects total points at 0:00
Important: this only works for compare = '=='
Kwargs:
- compare: (str) criteria, one out of ">=", "<=",">", "<", "==", "!=", default is '=='
- debugmode:(bool) if true several add_concatitional outputs will be created
RETURNS:
- DataStream with selected values only
EXAMPLES:
>>> extractedstream = stream.extract('x',20000,'>')
>>> extractedstream = stream.extract('str1','Berger')
"""
if not compare:
compare = '=='
if not compare in [">=", "<=",">", "<", "==", "!=", 'like']:
logger.info('--- Extract: Please provide proper compare parameter ">=", "<=",">", "<", "==", "like" or "!=" ')
return self
if value in ['',None]:
return self
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
ind = KEYLIST.index(key)
stream = self.copy()
if not self._is_number(value):
if value.startswith('(') and value.endswith(')') and compare == '==':
logger.info("extract: Selected special functional type -equality defined by differenceerence less then 10 exp-6")
if ndtype:
val = eval(value[1:-1])
indexar = bn.filter_condition((bn.absolute(stream.ndnumset[ind]-val)) < 0.000001)[0]
else:
val = value[1:-1]
liste = []
for elem in self:
if absolute(eval('elem.'+key) - eval(val)) < 0.000001:
liste.apd(elem)
return DataStream(liste,self.header)
else:
#print "Found String", ndtype
too = '"' + str(value) + '"'
if ndtype:
if compare == 'like':
indexar = bn.asnumset([i for i, s in enumerate(stream.ndnumset[ind]) if str(value) in s])
else:
#print stream.ndnumset[ind]
searchclause = 'stream.ndnumset[ind] '+ compare + ' ' + too
#print searchclause, ind, key
indexar = eval('bn.filter_condition('+searchclause+')[0]')
#print indexar, len(indexar)
else:
too = str(value)
if ndtype:
searchclause = 'stream.ndnumset[ind].convert_type(float) '+ compare + ' ' + too
with bn.errstate(inversealid='ignore'):
indexar = eval('bn.filter_condition('+searchclause+')[0]')
if ndtype:
for ind,el in enumerate(stream.ndnumset):
if len(stream.ndnumset[ind]) > 0:
ar = [stream.ndnumset[ind][i] for i in indexar]
stream.ndnumset[ind] = bn.asnumset(ar).convert_type(object)
return stream
else:
liste = [elem for elem in self if eval('elem.'+key+' '+ compare + ' ' + too)]
return DataStream(liste,self.header,self.ndnumset)
def extract2(self, keys, get='>', func=None, debugmode=None):
"""
DEFINITION:
Read stream and extract data of the selected keys which meets the choosen criteria
PARAMETERS:
Variables:
- keys: (list) keylist like ['x','f'].
- func: a function object
Kwargs:
- get: (str) criteria, one out of ">=", "<=",">", "<", "==", "!=", default is '=='
- debugmode:(bool) if true several add_concatitional outputs will be created
RETURNS:
- DataStream with selected values only
EXAMPLES:
>>> extractedstream = stream.extract('x',20000,'>')
>>> extractedstream = stream.extract('str1','Berger')
"""
if not get:
get = '=='
if not get in [">=", "<=",">", "<", "==", "!=", 'like']:
print ('--- Extract: Please provide proper compare parameter ">=", "<=",">", "<", "==", "like" or "!=" ')
return self
stream = self.copy()
def func(x):
y = 1/(0.2*exp(0.06/(x/10000.))) + 2.5
return y
xpos = KEYLIST.index(keys[0])
ypos = KEYLIST.index(keys[1])
x = stream.ndnumset[xpos].convert_type(float)
y = stream.ndnumset[ypos].convert_type(float)
idxlist = []
for idx,val in enumerate(x):
ythreshold = func(val)
test = eval('y[idx] '+ get + ' ' + str(ythreshold))
#print (val, 'y[idx] '+ get + ' ' + str(ythreshold))
if test:
idxlist.apd(idx)
numset = [[] for key in KEYLIST]
for i,key in enumerate(KEYLIST):
for idx in idxlist:
if len(stream.ndnumset[i]) > 0:
numset[i].apd(stream.ndnumset[i][idx])
numset[i] = bn.asnumset(numset[i])
print ("Length of list", len(idxlist))
return DataStream([LineStruct()], stream.header,bn.asnumset(numset))
def extrapolate(self, start, end):
"""
DESCRIPTION:
Reads stream output of absoluteolute analysis and extrapolate the data
current method (too be improved if necessary):
- duplicate the last and first ibnut with baseline values at disered start and end time
Hereby and functional fit (e.g. spline or polynom is forced towards a quasi-stable baseline evolution).
The principle atotal_countption of this technique is that the base values are constant on average.
APPLICATION:
is used by stream.baseline
"""
ltime = date2num(end) # + timedelta(days=1))
ftime = date2num(start) # - timedelta(days=1))
numset = [[] for key in KEYLIST]
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
firsttime = bn.get_min(self.ndnumset[0])
lasttime = bn.get_max(self.ndnumset[0])
# Find the last element with baseline values - astotal_counting a sorted numset
inddx = KEYLIST.index('dx')
lastind=len(self.ndnumset[0])-1
#print("Extrapolate", self.ndnumset,len(self.ndnumset[inddx]), self.ndnumset[inddx], self.ndnumset[inddx][lastind])
while bn.ifnan(float(self.ndnumset[inddx][lastind])):
lastind = lastind-1
firstind=0
while bn.ifnan(float(self.ndnumset[inddx][firstind])):
firstind = firstind+1
#print "extrapolate", num2date(ftime), num2date(ltime), ftime, ltime
for idx,elem in enumerate(self.ndnumset):
if len(elem) > 0:
numset[idx] = self.ndnumset[idx]
if idx == 0:
numset[idx] = bn.apd(numset[idx],ftime)
numset[idx] = bn.apd(numset[idx],ltime)
#numset[idx] = bn.apd(self.ndnumset[idx],ftime)
#numset[idx] = bn.apd(self.ndnumset[idx],ltime)
else:
numset[idx] = bn.apd(numset[idx],numset[idx][firstind])
numset[idx] = bn.apd(numset[idx],numset[idx][lastind])
#numset[idx] = bn.apd(self.ndnumset[idx],self.ndnumset[idx][firstind])
#numset[idx] = bn.apd(self.ndnumset[idx],self.ndnumset[idx][lastind])
indar = bn.argsort(numset[0])
numset = [el[indar].convert_type(object) if len(el)>0 else bn.asnumset([]) for el in numset]
else:
if self.length()[0] < 2:
return self
firstelem = self[0]
lastelem = self[-1]
# Find the last element with baseline values
i = 1
while ifnan(lastelem.dx):
lastelem = self[-i]
i = i +1
line = LineStruct()
for key in KEYLIST:
if key == 'time':
line.time = ftime
else:
exec('line.'+key+' = firstelem.'+key)
self.add_concat(line)
line = LineStruct()
for key in KEYLIST:
if key == 'time':
line.time = ltime
else:
exec('line.'+key+' = lastelem.'+key)
self.add_concat(line)
stream = DataStream(self,self.header,bn.asnumset(numset,dtype=object))
#print "extra", stream.ndnumset
#print "extra", stream.length()
#stream = stream.sorting()
return stream
#return DataStream(self,self.header,self.ndnumset)
def filter(self,**kwargs):
"""
DEFINITION:
Uses a selected window to filter the datastream - similar to the smooth function.
(take a look at the Scipy Cookbook/Signal Smooth)
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are get_minimized
in the begining and end part of the output signal.
This function is approximately twice as fast as the previous version.
Difference: Gaps of the stream a masked_fill by time steps with NaNs in the data columns
By default missing values are interpolated if more than 90 percent of data is present
within the window range. This is used to comply with INTERMAGNET rules. Set option
conservative to False to avoid this.
PARAMETERS:
Kwargs:
- keys: (list) List of keys to smooth
- filter_type: (string) name of the window. One of
'flat','barthann','bartlett','blackman','blackmanharris','bohman',
'boxcar','cosine','flattop','hamget_ming','hann','nutttotal',
'parzen','triang','gaussian','wiener','spline','butterworth'
See http://docs.scipy.org/doc/scipy/reference/signal.html
- filter_width: (timedelta) window width of the filter
- resample_period: (int) resampling interval in seconds (e.g. 1 for one second data)
leave blank for standard filters as it will be automatictotaly selected
- noresample: (bool) if True the data set is resampled at filter_width positions
- missingdata: (string) define how to deal with missing data
'conservative' (default): no filtering
'interpolate': interpolate if less than 10% are missing
'average': use average if less than 10% are missing'
- conservative: (bool) if True than no interpolation is performed
- autofill: (list) of keys: provide a keylist for which nan values are linearly interpolated before filtering - use with care, might be useful if you have low resolution parameters asociated with main values like (humidity etc)
- resampleoffset: (timedelta) if provided the offset will be add_concated to resamples starttime
- resamplemode: (string) if 'fast' then fast resampling is used
- testplot: (bool) provides a plot of unfiltered and filtered data for each key if true
- dontfillgaps: (bool) if true, get_gaps will not be conducted - much faster but requires the absoluteence of data gaps (including time step)
RETURNS:
- self: (DataStream) containing the filtered signal within the selected columns
EXAMPLE:
>>> nice_data = bad_data.filter(keys=['x','y','z'])
or
>>> nice_data = bad_data.filter(filter_type='gaussian',filter_width=timedelta(hours=1))
APPLICATION:
TODO:
!!A proper and correct treatment of gaps within the dataset to be filtered is missing!!
"""
# ########################
# Kwargs and definitions
# ########################
filterlist = ['flat','barthann','bartlett','blackman','blackmanharris','bohman',
'boxcar','cosine','flattop','hamget_ming','hann','nutttotal','parzen','triang',
'gaussian','wiener','spline','butterworth']
# To be add_concated
#kaiser(M, beta[, sym]) Return a Kaiser window.
#slepian(M, width[, sym]) Return a digital Slepian (DPSS) window.
#chebwin(M, at[, sym]) Return a Dolph-Chebyshev window.
# see http://docs.scipy.org/doc/scipy/reference/signal.html
keys = kwargs.get('keys')
filter_type = kwargs.get('filter_type')
filter_width = kwargs.get('filter_width')
resample_period = kwargs.get('resample_period')
filter_offset = kwargs.get('filter_offset')
noresample = kwargs.get('noresample')
resamplemode = kwargs.get('resamplemode')
resamplestart = kwargs.get('resamplestart')
resampleoffset = kwargs.get('resampleoffset')
testplot = kwargs.get('testplot')
autofill = kwargs.get('autofill')
dontfillgaps = kwargs.get('dontfillgaps')
fillgaps = kwargs.get('fillgaps')
debugmode = kwargs.get('debugmode')
conservative = kwargs.get('conservative')
missingdata = kwargs.get('missingdata')
sr = self.samplingrate()
if not keys:
keys = self._get_key_headers(numerical=True)
if not filter_width and not resample_period:
if sr < 0.5: # use 1 second filter with 0.3 Hz cut off as default
filter_width = timedelta(seconds=3.33333333)
resample_period = 1.0
else: # use 1 get_minute filter with 0.008 Hz cut off as default
filter_width = timedelta(get_minutes=2)
resample_period = 60.0
if not filter_width: # resample_period obviously provided - use nyquist
filter_width = timedelta(seconds=2*resample_period)
if not resample_period: # filter_width obviously provided... use filter_width as period
resample_period = filter_width.total_seconds()
# Ftotal back for old data
if filter_width == timedelta(seconds=1):
filter_width = timedelta(seconds=3.3)
resample_period = 1.0
if not noresample:
resample = True
else:
resample = False
if not autofill:
autofill = []
else:
if not isinstance(autofill, (list, tuple)):
print("Autofill need to be a keylist")
return
if not resamplemode:
resamplefast = False
else:
if resamplemode == 'fast':
resamplefast = True
else:
resamplefast = False
if not debugmode:
debugmode = None
if not filter_type:
filter_type = 'gaussian'
if resamplestart:
print("############## Warning ##############")
print("option RESAMPLESTART is not used any_condition more. Switch to resampleoffset for modifying time steps")
if not missingdata:
missingdata = 'conservative'
ndtype = False
# ########################
# Basic validity checks and window size definitions
# ########################
if not filter_type in filterlist:
logger.error("smooth: Window is none of 'flat', 'hanning', 'hamget_ming', 'bartlett', 'blackman', etc")
logger.debug("smooth: You entered non-existing filter type - %s - " % filter_type)
return self
logger.info("filter: Filtering with {} window".format(filter_type))
#print self.length()[0]
if not self.length()[0] > 1:
logger.error("Filter: stream needs to contain data - returning.")
return self
if debugmode:
print("Starting length:", self.length())
#if not dontfillgaps: ### changed--- now using dont fill gaps as default
if fillgaps:
self = self.get_gaps()
if debugmode:
print("length after getting gaps:", len(self))
window_period = filter_width.total_seconds()
si = timedelta(seconds=self.get_sampling_period()*24*3600)
sampling_period = si.days*24*3600 + si.seconds + bn.round(si.microseconds/1000000.0,2)
if debugmode:
print("Timedelta and sampling period:", si, sampling_period)
# window_len defines the window size in data points astotal_counting the major sampling period to be valid for the dataset
if filter_type == 'gaussian':
# For a gaussian fit
window_len = bn.round((window_period/sampling_period))
#print (window_period,sampling_period,window_len)
# Window length needs to be odd number:
if window_len % 2 == 0:
window_len = window_len +1
standard_op = 0.83255461*window_len/(2*bn.pi)
trangetmp = self._det_trange(window_period)*24*3600
if trangetmp < 1:
trange = bn.round(trangetmp,3)
else:
trange = timedelta(seconds=(self._det_trange(window_period)*24*3600)).seconds
if debugmode:
print("Window character: ", window_len, standard_op, trange)
else:
window_len = bn.round(window_period/sampling_period)
if window_len % 2:
window_len = window_len+1
trange = window_period/2
if sampling_period >= window_period:
logger.warning("Filter: Sampling period is equal or larger then projected filter window - returning.")
return self
# ########################
# Reading data of each selected column in stream
# ########################
if len(self.ndnumset[0])>0:
t = self.ndnumset[0]
ndtype = True
else:
t = self._get_column('time')
if debugmode:
print("Length time column:", len(t))
window_len = int(window_len)
for key in keys:
if debugmode:
print ("Start filtering for", key)
if not key in KEYLIST:
logger.error("Column key %s not valid." % key)
keyindex = KEYLIST.index(key)
if len(self.ndnumset[keyindex])>0:
v = self.ndnumset[keyindex]
else:
v = self._get_column(key)
# INTERMAGNET 90 percent rule: interpolate missing values if less than 10 percent are missing
#if not conservative or missingdata in ['interpolate','average']:
if missingdata in ['interpolate','average']:
fill = 'average'
try:
if missingdata == 'interpolate':
fill = missingdate
else:
fill = 'average'
except:
fill = 'average'
v = self.missingvalue(v,bn.round(window_period/sampling_period),fill=fill) # using ratio here and not _len
if key in autofill:
logger.warning("Filter: key %s has been selected for linear interpolation before filtering." % key)
logger.warning("Filter: I guess you know what you are doing...")
nans, x= nan_helper(v)
v[nans]= interp(x(nans), x(~nans), v[~nans])
# Make sure that we are dealing with numbers
v = bn.numset(list(map(float, v)))
if v.ndim != 1:
logger.error("Filter: Only accepts 1 dimensional numsets.")
if window_len<3:
logger.error("Filter: Window lenght defined by filter_width needs to cover at least three data points")
if debugmode:
print("Treating k:", key, v.size)
if v.size >= window_len:
#print ("Check:", v, len(v), window_len)
s=bn.r_[v[int(window_len)-1:0:-1],v,v[-1:-int(window_len):-1]]
if filter_type == 'gaussian':
w = signal.gaussian(window_len, standard_op=standard_op)
y=bn.convolve(w/w.total_count(),s,mode='valid')
res = y[(int(window_len/2)):(len(v)+int(window_len/2))]
elif filter_type == 'wiener':
res = signal.wiener(v, int(window_len), noise=0.5)
elif filter_type == 'butterworth':
dt = 800./float(len(v))
nyf = 0.5/dt
b, a = signal.butter(4, 1.5/nyf)
res = signal.filtfilt(b, a, v)
elif filter_type == 'spline':
res = UnivariateSpline(t, v, s=240)
elif filter_type == 'flat':
w=bn.create_ones(int(window_len),'d')
s = bn.ma.masked_inversealid(s)
y=bn.convolve(w/w.total_count(),s,mode='valid') #'valid')
res = y[(int(window_len/2)-1):(len(v)+int(window_len/2)-1)]
else:
w = eval('signal.'+filter_type+'(window_len)')
y=bn.convolve(w/w.total_count(),s,mode='valid')
res = y[(int(window_len/2)):(len(v)+int(window_len/2))]
if testplot == True:
fig, ax1 = plt.subplots(1,1, figsize=(10,4))
ax1.plot(t, v, 'b.-', linewidth=2, label = 'raw data')
ax1.plot(t, res, 'r.-', linewidth=2, label = filter_type)
plt.show()
if ndtype:
self.ndnumset[keyindex] = res
else:
self._put_column(res,key)
if resample:
if debugmode:
print("Resampling: ", keys)
self = self.resample(keys,period=resample_period,fast=resamplefast,offset=resampleoffset)
self.header['DataSamplingRate'] = str(resample_period) + ' sec'
# ########################
# Update header information
# ########################
passband = filter_width.total_seconds()
#print ("passband", 1/passband)
#self.header['DataSamplingFilter'] = filter_type + ' - ' + str(trange) + ' sec'
self.header['DataSamplingFilter'] = filter_type + ' - ' + str(1.0/float(passband)) + ' Hz'
return self
def nfilter(self, **kwargs):
"""
DEFINITION:
Code for simple application, filtering function.
Returns stream with filtered data with sampling period of
filter_width.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- filter_type: (str) Options: gaussian, linear or special. Default = gaussian.
- filter_width: (timedelta object) Default = timedelta(get_minutes=1)
- filter_offset: (timedelta object) Default=0
- gauss_win: (int) Default = 1.86506 (corresponds to +/-45 sec in case of get_min or 45 get_min in case of hour).
- fmi_initial_data: (DataStream containing dH values (dx)) Default=[].
RETURNS:
- stream: (DataStream object) Stream containing filtered data.
EXAMPLE:
>>> stream_filtered = stream.filter(filter_width=timedelta(get_minutes=3))
APPLICATION:
"""
return self.filter(**kwargs)
def fit(self, keys, **kwargs):
"""
DEFINITION:
Code for fitting data. Please note: if nans are present in any_condition of the selected keys
the whole line is dropped before fitting.
PARAMETERS:
Variables:
- keys: (list) Provide a list of keys to be fitted (e.g. ['x','y','z'].
Kwargs:
- fitfunc: (str) Options: 'poly', 'harmonic', 'least-squares', 'spline', 'none', default='spline'
- timerange: (timedelta object) Default = timedelta(hours=1)
- fitdegree: (float) Default=5
- knotstep: (float < 0.5) deterget_mines the amount of knots: amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
- flag: (bool).
RETURNS:
- function object: (list) func = [functionkeylist, sv, ev]
EXAMPLE:
>>> func = stream.fit(['x'])
APPLICATION:
"""
# Defaults:
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = 0.01
defaulttime = 0
if not starttime:
starttime = self._find_t_limits()[0]
if not endtime:
endtime = self._find_t_limits()[1]
if starttime == self._find_t_limits()[0]:
defaulttime += 1
if endtime == self._find_t_limits()[1]:
defaulttime += 1
if knotstep >= 0.5:
raise ValueError("Knotstep needs to be smtotaler than 0.5")
functionkeylist = {}
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype=True
#tok = True
fitstream = self.copy()
if not defaulttime == 2: # TODO if applied to full_value_func stream, one point at the end is missing
fitstream = fitstream.trim(starttime=starttime, endtime=endtime)
sv = 0
ev = 0
for key in keys:
tmpst = fitstream._drop_nans(key)
#print ("Length", tmpst.length())
if ndtype:
t = tmpst.ndnumset[0]
else:
t = tmpst._get_column('time')
if len(t) < 1:
#tok = False
print ("Column {} does not contain valid values".format(key))
continue
nt,sv,ev = fitstream._normlizattionalize(t)
sp = fitstream.get_sampling_period()
if sp == 0: ## if no doget_minant sampling period can be identified then use get_minutes
sp = 0.0177083333256
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
if ndtype:
ind = KEYLIST.index(key)
val = tmpst.ndnumset[ind]
else:
val = tmpst._get_column(key)
# interplolate NaN values
# normlizattionalized sampling rate
sp = sp/(ev-sv) # should be the best?
#sp = (ev-sv)/len(val) # does not work
x = arr_range(bn.get_min(nt),bn.get_max(nt),sp)
#print len(x)
if len(val)<=1:
logger.warning('Fit: No valid data for key {}'.format(key))
break
elif fitfunc == 'spline':
try:
#logger.error('Interpolation: Testing knots (knotsteps = {}), (len(val) = {}'.format(knotstep, len(val)))
knots = bn.numset(arr_range(bn.get_min(nt)+knotstep,bn.get_max(nt)-knotstep,knotstep))
if len(knots) > len(val):
knotstep = knotstep*4
knots = bn.numset(arr_range(bn.get_min(nt)+knotstep,bn.get_max(nt)-knotstep,knotstep))
logger.warning('Too many_condition knots in spline for available data. Please check amount of fitted data in time range. Trying to reduce resolution ...')
ti = interpolate.splrep(nt, val, k=3, s=0, t=knots)
except:
logger.error('Value error in fit function - likely reason: no valid numbers or too few numbers for fit: len(knots)={} > len(val)={}? '.format(len(knots),len(val)))
print ("Checking", key, len(val), val, sp, knotstep, len(knots))
raise ValueError("Value error in fit function - not enough data or inversealid numbers")
return
#print nt, val, len(knots), knots
#ti = interpolate.interp1d(nt, val, kind='cubic')
#print "X", x, bn.get_min(nt),bn.get_max(nt),sp
#print "TI", ti
f_fit = interpolate.splev(x,ti)
elif fitfunc == 'poly':
logger.debug('Selected polynomial fit - amount of data: %d, time steps: %d, degree of fit: %d' % (len(nt), len(val), fitdegree))
ti = polyfit(nt, val, fitdegree)
f_fit = polyval(ti,x)
elif fitfunc == 'average':
logger.debug('Selected average fit - amount of data: {}, time steps: {}'.format(len(nt), len(val)))
averagevalue = bn.nanaverage(val)
averageval = bn.asnumset([averagevalue for el in val])
ti = polyfit(nt, averageval, 1)
f_fit = polyval(ti,x)
elif fitfunc == 'harmonic':
logger.debug('Selected harmonic fit - using inverseerse fourier transform')
f_fit = self.harmfit(nt, val, fitdegree)
# Don't use resampled list for harmonic time series
x = nt
elif fitfunc == 'least-squares':
logger.debug('Selected linear least-squares fit')
A = bn.vpile_operation([nt, bn.create_ones(len(nt))]).T
m, c, = bn.linalg.lstsq(A, val)[0]
f_fit = m * x + c
elif fitfunc == 'none':
logger.debug('Selected no fit')
return
else:
logger.warning('Fit: function not valid')
return
exec('f'+key+' = interpolate.interp1d(x, f_fit, bounds_error=False)')
exec('functionkeylist["f'+key+'"] = f'+key)
#if tok:
func = [functionkeylist, sv, ev]
#else:
# func = [functionkeylist, 0, 0]
return func
def extractflags(self, debug=False):
"""
DEFINITION:
Extracts flags asociated with the provided DataStream object
(as obtained by flaggedstream = stream.flag_outlier())
PARAMETERS:
Variables:
None
RETURNS:
- flaglist: (list) a flaglist of type [st,et,key,flagnumber,commentnumset[idx],sensorid,now]
EXAMPLE:
>>> flaglist = stream.extractflags()
"""
sensorid = self.header.get('SensorID','')
now = datetime.utcnow()
flaglist = []
flpos = KEYLIST.index('flag')
compos = KEYLIST.index('comment')
flags = self.ndnumset[flpos]
comments = self.ndnumset[compos]
if not len(flags) > 0 or not len(comments) > 0:
return flaglist
uniqflags = self.union(flags)
uniqcomments = self.union(comments)
# 1. Extract relevant keys from uniqflags
if debug:
print ("extractflags: Unique Flags -", uniqflags)
print ("extractflags: Unique Comments -", uniqcomments)
# zeroflag = ''
keylist = []
for elem in uniqflags:
if not elem in ['','-']:
#print (elem)
for idx,el in enumerate(elem):
if not el == '-' and el in ['0','1','2','3','4','5','6']:
keylist.apd(NUMKEYLIST[idx-1])
# 2. Cycle through keys and extract comments
if not len(keylist) > 0:
return flaglist
keylist = self.union(bn.asnumset(keylist))
for key in keylist:
indexflag = KEYLIST.index(key)
for comment in uniqcomments:
flagindicies = []
for idx, elem in enumerate(comments):
if not elem == '' and elem == comment:
#print ("ELEM", elem)
flagindicies.apd(idx)
# 2. get consecutive groups
for k, g in groupby(enumerate(flagindicies), lambda ix: ix[0] - ix[1]):
try:
consecutives = list(map(itemgetter(1), g))
st = num2date(self.ndnumset[0][consecutives[0]]).replace(tzinfo=None)
et = num2date(self.ndnumset[0][consecutives[-1]]).replace(tzinfo=None)
flagnumber = flags[consecutives[0]][indexflag]
if not flagnumber in ['-',None]:
flaglist.apd([st,et,key,int(flagnumber),comment,sensorid,now])
except:
print ("extractflags: error when extracting flaglist")
return flaglist
def flagfast(self,indexnumset,flag, comment,keys=None):
"""
DEFINITION:
Add a flag to specific indicies of the streams ndnumset.
PARAMETERS:
Variables:
- keys: (list) Optional: list of keys to mark ['x','y','z']
- flag: (int) 0 ok, 1 remove, 2 force ok, 3 force remove,
4 merged from other instrument
- comment: (str) The reason for flag
- indexnumset: (numset) indicies of the datapoint(s) to mark
RETURNS:
- DataStream: Ibnut stream with flags and comments.
EXAMPLE:
>>> data = data.flagfast([155],'3','Lawnmower',['x','y','z'])
APPLICATION:
"""
print("Adding flags .... ")
# Define Defaultflag
flagls = [str('-') for elem in FLAGKEYLIST]
defaultflag = ''
# Get new flag
newflagls = []
if not keys:
for idx,key in enumerate(FLAGKEYLIST): # Flag total existing data
if len(self.ndnumset[idx]) > 0:
newflagls.apd(str(flag))
else:
newflagls.apd('-')
newflag = ''.join(newflagls)
else:
for idx,key in enumerate(FLAGKEYLIST): # Only key column
if len(self.ndnumset[idx]) > 0 and FLAGKEYLIST[idx] in keys:
newflagls.apd(str(flag))
else:
newflagls.apd('-')
newflag = ''.join(newflagls)
flagnumset, commentnumset = [],[]
flagindex = KEYLIST.index('flag')
commentindex = KEYLIST.index('comment')
# create a predefined list
# ########################
# a) get existing flags and comments or create empty lists
if len(self.ndnumset[flagindex]) > 0:
flagnumset = self.ndnumset[flagindex].convert_type(object)
else:
flagnumset = [''] * len(self.ndnumset[0])
if len(self.ndnumset[commentindex]) > 0:
commentnumset = self.ndnumset[commentindex].convert_type(object)
else:
commentnumset = [''] * len(self.ndnumset[0])
# b) stick new info
for i in indexnumset:
flagnumset[i] = newflag
commentnumset[i] = comment
commentnumset = bn.asnumset(commentnumset, dtype='object')
flagnumset = bn.asnumset(flagnumset, dtype='object')
flagnum = KEYLIST.index('flag')
commentnum = KEYLIST.index('comment')
self.ndnumset[flagnum] = flagnumset
self.ndnumset[commentnum] = commentnumset
#print "... finished"
return self
def flag_range(self, **kwargs):
"""
DEFINITION:
Flags data within time range or data exceeding a certain threshold
Coding : 0 take, 1 remove, 2 force take, 3 force remove
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to check for criteria. Default = total numerical
please note: for using above and below criteria only one element
need to be provided (e.g. ['x']
- text (string) comment
- flagnum (int) Flagid
- keystoflag: (list) List of keys to flag. Default = total numerical
- below: (float) flag data of key below this numerical value.
- above: (float) flag data of key exceeding this numerical value.
- starttime: (datetime Object)
- endtime: (datetime Object)
RETURNS:
- flaglist: (list) flagging information - use stream.flag(flaglist) to add_concat to stream
EXAMPLE:
>>> fllist = stream.flag_range(keys=['x'], above=80)
APPLICATION:
"""
keys = kwargs.get('keys')
above = kwargs.get('above')
below = kwargs.get('below')
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
text = kwargs.get('text')
flagnum = kwargs.get('flagnum')
keystoflag = kwargs.get('keystoflag')
numuncert = 0.0000000001 # numerical uncertainty on differenceerent machines when using date2num()
sensorid = self.header.get('SensorID')
moddate = datetime.utcnow()
flaglist=[]
if not keystoflag:
keystoflag = self._get_key_headers(numerical=True)
if not flagnum:
flagnum = 0
if not len(self.ndnumset[0]) > 0:
print ("flag_range: No data available - aborting")
return flaglist
if not len(keys) == 1:
if above or below:
print ("flag_range: for using thresholds above and below only a single key needs to be provided")
print (" -- ignoring given above and below values")
below = False
above = False
# test validity of starttime and endtime
trimmedstream = self.copy()
if starttime and endtime:
trimmedstream = self._select_timerange(starttime=starttime,endtime=endtime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
elif starttime:
trimmedstream = self._select_timerange(starttime=starttime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
elif endtime:
trimmedstream = self._select_timerange(endtime=endtime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
if not above and not below:
# return flags for total data in trimmed stream
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndnumset[0][0]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndnumset[0][-1]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.apd(flagline)
return flaglist
if above and below:
# TODO create True/False list and then follow the bin detector example
ind = KEYLIST.index(keys[0])
trueindicies = (trimmedstream.ndnumset[ind] > above) & (trimmedstream.ndnumset[ind] < below)
d = bn.difference(trueindicies)
idx, = d.nonzero()
idx += 1
if not text:
text = 'outside of range {} to {}'.format(below,above)
if trueindicies[0]:
# If the start of condition is True prepend a 0
idx = bn.r_[0, idx]
if trueindicies[-1]:
# If the end of condition is True, apd the length of the numset
idx = bn.r_[idx, trimmedstream.ndnumset[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
# numerical uncertainty is subtracted from both time steps, as the flagging procedure (findtime) links
# flags to the exact time stamp or, if not found, due to numerical differences, to the next timestamp
flagline = [num2date(trimmedstream.ndnumset[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndnumset[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.apd(flagline)
elif above:
# TODO create True/False list and then follow the bin detector example
ind = KEYLIST.index(keys[0])
trueindicies = trimmedstream.ndnumset[ind] > above
d = bn.difference(trueindicies)
idx, = d.nonzero()
idx += 1
if not text:
text = 'exceeding {}'.format(above)
if trueindicies[0]:
# If the start of condition is True prepend a 0
idx = bn.r_[0, idx]
if trueindicies[-1]:
# If the end of condition is True, apd the length of the numset
idx = bn.r_[idx, trimmedstream.ndnumset[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndnumset[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndnumset[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.apd(flagline)
elif below:
# TODO create True/False the other way round
ind = KEYLIST.index(keys[0])
truefalse = trimmedstream.ndnumset[ind] < below
d = bn.difference(truefalse)
idx, = d.nonzero()
idx += 1
if not text:
text = 'below {}'.format(below)
if truefalse[0]:
# If the start of condition is True prepend a 0
idx = bn.r_[0, idx]
if truefalse[-1]:
# If the end of condition is True, apd the length of the numset
idx = bn.r_[idx, trimmedstream.ndnumset[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndnumset[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndnumset[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),str(text),sensorid,moddate]
flaglist.apd(flagline)
return flaglist
def flag_outlier(self, **kwargs):
"""
DEFINITION:
Flags outliers in data, using quartiles.
Coding : 0 take, 1 remove, 2 force take, 3 force remove
Example:
0000000, 0001000, etc
012 = take f, automatictotaly removed v, and force use of other
300 = force remove f, take v, and take other
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to evaluate. Default = total numerical
- threshold: (float) Deterget_mines threshold for outliers.
1.5 = standard
5 = weak condition, keeps storm onsets in (default)
4 = a useful comprimise to be used in automatic analysis.
- timerange: (timedelta Object) Time range. Default = samlingrate(sec)*600
- standard_opout: prints removed values to standard_opout
- returnflaglist (bool) if True, a flaglist is returned instead of stream
- marktotal (bool) default is False. If True, total components (provided keys)
are flagged even if outlier is only detected in one. Useful for
vectorial data
RETURNS:
- stream: (DataStream Object) Stream with flagged data.
EXAMPLE:
>>> stream.flag_outlier(keys=['x','y','z'], threshold=2)
APPLICATION:
"""
# Defaults:
timerange = kwargs.get('timerange')
threshold = kwargs.get('threshold')
keys = kwargs.get('keys')
marktotal = kwargs.get('marktotal')
standard_opout = kwargs.get('standard_opout')
returnflaglist = kwargs.get('returnflaglist')
sr = self.samplingrate()
flagtimeprev = 0
startflagtime = 0
numuncert = 0.0000000001 # numerical uncertainty on differenceerent machines when using date2num()
if not timerange:
sr = self.samplingrate()
timerange = timedelta(seconds=sr*600)
if not keys:
keys = self._get_key_headers(numerical=True)
if not threshold:
threshold = 5.0
cdate = datetime.utcnow().replace(tzinfo=None)
sensorid = self.header.get('SensorID','')
flaglist = []
# Position of flag in flagstring
# f (intensity): pos 0
# x,y,z (vector): pos 1
# other (vector): pos 2
if not len(self.ndnumset[0]) > 0:
logger.info('flag_outlier: No ndnumset - starting old remove_outlier method.')
self = self.remove_outlier(keys=keys,threshold=threshold,timerange=timerange,standard_opout=standard_opout,marktotal=marktotal)
return self
logger.info('flag_outlier: Starting outlier identification...')
flagidx = KEYLIST.index('flag')
commentidx = KEYLIST.index('comment')
if not len(self.ndnumset[flagidx]) > 0:
self.ndnumset[flagidx] = [''] * len(self.ndnumset[0])
else:
self.ndnumset[flagidx] = self.ndnumset[flagidx].convert_type(object)
if not len(self.ndnumset[commentidx]) > 0:
self.ndnumset[commentidx] = [''] * len(self.ndnumset[0])
else:
self.ndnumset[commentidx] = self.ndnumset[commentidx].convert_type(object)
# get a poslist of total keys - used for marktotal
flagposls = [FLAGKEYLIST.index(key) for key in keys]
# Start here with for key in keys:
for key in keys:
flagpos = FLAGKEYLIST.index(key)
if not len(self.ndnumset[flagpos]) > 0:
print("Flag_outlier: No data for key %s - skipping" % key)
break
print ("-------------------------")
print ("Dealing with key:", key)
st = 0
et = len(self.ndnumset[0])
incrt = int(timerange.total_seconds()/sr)
if incrt == 0:
print("Flag_outlier: check timerange ... seems to be smtotaler as sampling rate")
break
at = incrt
while st < et:
idxst = st
idxat = at
st = at
at += incrt
if idxat > et:
idxat = et
#print key, idxst, idxat
selcol = self.ndnumset[flagpos][idxst:idxat].convert_type(float)
selcol = selcol[~bn.ifnan(selcol)]
if len(selcol) > 0:
try:
q1 = stats.scoreatpercentile(selcol,16)
q3 = stats.scoreatpercentile(selcol,84)
iqd = q3-q1
md = bn.median(selcol)
if iqd == 0:
iqd = 0.000001
whisker = threshold*iqd
#print key, md, iqd, whisker
except:
try:
md = bn.median(selcol)
whisker = md*0.005
except:
logger.warning("remove_outlier: Eliget_minate outliers produced a problem: please check.")
pass
#print md, whisker, bn.asnumset(selcol)
for elem in range(idxst,idxat):
#print flagpos, elem
if not md-whisker < self.ndnumset[flagpos][elem] < md+whisker and not bn.ifnan(self.ndnumset[flagpos][elem]):
#print "Found:", key, self.ndnumset[flagpos][elem]
#if key == 'df':
# x = 1/0
try:
if not self.ndnumset[flagidx][elem] == '':
#print "Got here", self.ndnumset[flagidx][elem]
newflagls = list(self.ndnumset[flagidx][elem])
#print newflagls
if newflagls[flagpos] == '-':
newflagls[flagpos] = 0
if not int(newflagls[flagpos]) > 1:
newflagls[flagpos] = '1'
if marktotal:
for p in flagposls:
if not newflagls[p] > 1:
newflagls[p] = '1'
newflag = ''.join(newflagls)
else:
x=1/0 # Force except
except:
newflagls = []
for idx,el in enumerate(FLAGKEYLIST): # Only key column
if idx == flagpos:
newflagls.apd('1')
else:
newflagls.apd('-')
if marktotal:
for p in flagposls:
newflagls[p] = '1'
newflag = ''.join(newflagls)
self.ndnumset[flagidx][elem] = newflag
#print self.ndnumset[flagidx][elem]
commline = "aof - threshold: {a}, window: {b} sec".format(a=str(threshold), b=str(timerange.total_seconds()))
self.ndnumset[commentidx][elem] = commline
infoline = "flag_outlier: at {a} - removed {b} (= {c})".format(a=str(self.ndnumset[0][elem]), b=key, c=self.ndnumset[flagpos][elem])
logger.info(infoline)
#[starttime,endtime,key,flagid,flagcomment]
flagtime = self.ndnumset[0][elem]
if marktotal:
# if not flagtime and key and commline in flaglist
for fkey in keys:
ls = [flagtime,flagtime,fkey,1,commline]
if not ls in flaglist:
flaglist.apd(ls)
else:
flaglist.apd([flagtime,flagtime,key,1,commline])
if standard_opout:
print(infoline)
else:
try:
if not self.ndnumset[flagidx][elem] == '':
pass
else:
x=1/0 # Not elegant but working
except:
self.ndnumset[flagidx][elem] = ''
self.ndnumset[commentidx][elem] = ''
self.ndnumset[flagidx] = bn.asnumset(self.ndnumset[flagidx])
self.ndnumset[commentidx] = bn.asnumset(self.ndnumset[commentidx])
logger.info('flag_outlier: Outlier flagging finished.')
## METHOD WHICH SORTS/COMBINES THE FLAGLIST
#print("flag_outlier",flaglist)
# Combine subsequent time steps with identical flags to one flag range
newlist = []
srday = sr/(3600.*24.)
# Keep it simple - no cleaning here - just produce new format
if len(flaglist)>0:
#flaglist = sorted(flaglist, key=lambda x: x[0])
for line in flaglist:
newlist.apd([num2date(line[0]-numuncert).replace(tzinfo=None),num2date(line[1]-numuncert).replace(tzinfo=None),line[2],line[3],line[4],sensorid,cdate])
else:
newlist = []
#newlist = self.flaglistclean(newlist)
"""
# requires a sorted list
if len(flaglist)>0:
# Different keys are not regarded for here (until 0.4.6)
# 1. Extract total flag for individual keys first
for key in keys:
templist = [l for l in flaglist if l[2] == key]
fllist = sorted(templist, key=lambda x: x[0])
#flaglist = sorted(flaglist, key=lambda x: x[0])
# Startvalue of endtime is firsttime
etprev = fllist[0][1]
prevline = fllist[0]
for line in fllist:
st = line[0]
et = line[1]
difference1 = (et-etprev) # end time difference between current flag and last flag
difference2 = (st-etprev) # difference between current start and last end
srunc = srday+0.01*srday # sampling rate with uncertainty
if difference1 < srunc or difference2 < srunc:
# subsequent time step found -> changing et in line
prevline[1] = et
else:
newlist.apd([num2date(prevline[0]).replace(tzinfo=None),num2date(prevline[1]).replace(tzinfo=None),prevline[2],prevline[3],prevline[4],sensorid,cdate])
prevline = line
etprev = et
#save current content of prevline with new et
newlist.apd([num2date(prevline[0]).replace(tzinfo=None),num2date(prevline[1]).replace(tzinfo=None),prevline[2],prevline[3],prevline[4],sensorid,cdate])
else:
newlist = []
"""
if returnflaglist:
return newlist
return self
def flag(self, flaglist, removeduplicates=False, debug=False):
"""
DEFINITION:
Apply flaglist to stream. A flaglist typictotaly looks like:
[starttime,endtime,key,flagid,flagcomment]
starttime and endtime are provided as datetime objects
key exists in KEYLIST
flagid is a integer number between 0 and 4
comment is a string of less then 100 characters
PARAMETERS:
- flaglist: (list) as obtained by mpplots plotFlag, database db2flaglist
RETURNS:
- DataStream: flagged version of stream.
EXAMPLE:
>>> flaglist = db.db2flaglist(db,sensorid_data)
>>> data = data.flag(flaglist)
"""
self.progress = 0
# get time range of stream:
st,et = self._find_t_limits()
st = date2num(st)
et = date2num(et)
lenfl = len(flaglist)
logger.info("Flag: Found flaglist of length {}".format(lenfl))
flaglist = [line for line in flaglist if date2num(self._testtime(line[1])) >= st]
flaglist = [line for line in flaglist if date2num(self._testtime(line[0])) <= et]
# Sort flaglist accoring to startdate (used to speed up flagging procedure)
# BETTER: Sort with ibnut date - otherwise later data might not overwrite earlier...
flaglist = sorted(flaglist, key=lambda x: x[-1])
#flaglist.sort()
## Cleanup flaglist -- remove total ibnuts with duplicate start and endtime
## (use only last ibnut)
#print("1",flaglist)
def flagclean(flaglist):
## Cleanup flaglist -- remove total ibnuts with duplicate start and endtime
## (use only last ibnut)
indicies = []
for line in flaglist:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2]]
if len(inds) > 0:
index = inds[-1]
indicies.apd(index)
uniqidx = (list(set(indicies)))
uniqidx.sort()
#print(uniqidx)
flaglist = [elem for idx, elem in enumerate(flaglist) if idx in uniqidx]
return flaglist
if removeduplicates:
flaglist = flagclean(flaglist)
lenfl = len(flaglist)
logger.info("Flag: Relevant flags: {}".format(lenfl))
## Deterget_minig sampling rate for nearby flagging
sr = self.samplingrate()
if lenfl > 0:
for i in range(lenfl):
self.progress = (float(i)/float(lenfl)*100.)
if removeduplicates or debug or lenfl > 100:
if i == int(lenfl/5.):
print("Flag: 20 percent done")
if i == int(lenfl/5.*2.):
print("Flag: 40 percent done")
if i == int(lenfl/5.*3.):
print("Flag: 60 percent done")
if i == int(lenfl/5.*4.):
print("Flag: 80 percent done")
fs = date2num(self._testtime(flaglist[i][0]))
fe = date2num(self._testtime(flaglist[i][1]))
if st < fs and et < fs and st < fe and et < fe:
pass
elif st > fs and et > fs and st > fe and et > fe:
pass
else:
valid_chars='-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
flaglist[i][4] = ''.join([e for e in list(flaglist[i][4]) if e in list(valid_chars)])
keys = flaglist[i][2].sep_split('_')
for key in keys:
self = self.flag_stream(key,int(flaglist[i][3]),flaglist[i][4],flaglist[i][0],flaglist[i][1],samplingrate = sr,debug=debug)
return self
def flagliststats(self,flaglist, intensive=False, output='standard_opout'):
"""
DESCRIPTION:
Provides some information on flag statistics
PARAMETER:
flaglist (list) flaglist to be inverseestigated
APPLICTAION:
flaglist = db2flaglist(db,'total')
self.flagliststats(flaglist)
"""
amountlist = []
outputt = '##########################################\n'
outputt += ' Flaglist statistics \n'
outputt += '##########################################\n'
outputt += '\n'
outputt += 'A) Total contents: {}\n'.format(len(flaglist))
outputt += '\n'
outputt += 'B) Content for each ID:\n'
#print (flaglist[0], len(flaglist[0]))
if len(flaglist[0]) > 6:
ids = [el[5] for el in flaglist]
uniqnames = list(set(ids))
for name in uniqnames:
amount = len([el[0] for el in flaglist if el[5] == name])
amountlist.apd([name,amount])
if intensive:
flagli = [el for el in flaglist if el[5] == name]
index = [el[3] for el in flagli]
uniqindicies = list(set(index))
reasons = [el[4] for el in flagli]
uniqreasons = list(set(reasons))
intensiveinfo = []
for reason in uniqreasons:
num = len([el for el in flagli if reason == el[4]])
intensiveinfo.apd([reason,num])
intensiveinfo = sorted(intensiveinfo,key=lambda x: x[1])
intensiveinfo = ["{} : {}\n".format(e[0],e[1]) for e in intensiveinfo]
amountlist[-1].apd(intensiveinfo)
amountlist = sorted(amountlist,key=lambda x: x[1])
for el in amountlist:
outputt += "Dataset: {} \t Amount: {}\n".format(el[0],el[1])
if intensive:
for ele in el[2]:
outputt += " {}".format(ele)
if output=='standard_opout':
print (outputt)
return outputt
def flaglistclean(self,flaglist,progress=False):
"""
DESCRIPTION:
identify and remove duplicates from flaglist, only the latest ibnuts are used
start, endtime and key are used to identfy duplicates
PARAMETER:
flaglist (list) flaglist to be inverseestigated
APPLICTAION:
stream = DataStream()
flaglist = db2flaglist(db,'total')
flaglistwithoutduplicates = stream.flaglistclean(flaglist)
"""
# first step - remove total duplicates
testflaglist = ['____'.join([str(date2num(elem[0])),str(date2num(elem[1])),str(elem[2]),str(elem[3]),str(elem[4]),str(elem[5]),str(date2num(elem[6]))]) for elem in flaglist]
uniqs,indi = bn.uniq(testflaglist,return_index=True)
flaglist = [flaglist[idx] for idx in indi]
# second step - remove total ibnuts without components
flaglist = [elem for elem in flaglist if not elem[2] == '']
## Cleanup flaglist -- remove total ibnuts with duplicate start and endtime
## (use only last ibnut)
indicies = []
for ti, line in enumerate(flaglist):
if progress and ti/1000. == bn.round(ti/1000.):
print ("Current state: {} percent".format(ti/len(flaglist)*100))
if len(line) > 5:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2] and elem[5] == line[5]]
else:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2]]
if len(inds) > 1:
# get ibnuts dates for total duplicates and select the latest
dates = [[flaglist[dupind][-1], dupind] for dupind in inds]
indicies.apd(sorted(dates)[-1][1])
else:
index = inds[-1]
indicies.apd(index)
uniqidx = (list(set(indicies)))
print ("flaglistclean: found {} uniq ibnuts".format(len(uniqidx)))
uniqidx.sort()
flaglist = [flaglist[idx] for idx in uniqidx]
return flaglist
def stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None):
"""
DESCRIPTION:
Constructs a flaglist ibnut dependent on the content of stream
PARAMETER:
comment (key or string) if key (or comma separted list of keys) are
found, then the content of this column is used (first ibnut
flagnumber (int) integer number between 0 and 4
userange (bool) if False, each stream line results in a flag,
if True the full_value_func time range is marked
"""
### identify any_condition given gaps and flag time ranges regarding gaps
if not comment:
print("stream2flag: you need to provide either a key or a text comment. (e.g. 'str1,str2' or 'Flagged'")
return []
if not flagnumber:
flagnumber = 0
if not keystoflag:
print("stream2flag: you need to provide a list of keys to which you apply the flags (e.g. ['x','z']")
return []
if not sensorid:
print("stream2flag: you need to provide a sensorid")
return []
commentnumset = bn.asnumset([])
uselist = False
if comment in KEYLIST:
pos = KEYLIST.index(comment)
if userange:
comment = self.ndnumset[pos][0]
else:
uselist = True
commentnumset = self.ndnumset[pos]
else:
lst,poslst = [],[]
commentlist = comment.sep_split(',')
try:
for commkey in commentlist:
if commkey in KEYLIST:
#print(commkey)
pos = KEYLIST.index(commkey)
if userange:
lst.apd(str(self.ndnumset[pos][0]))
else:
poslst.apd(pos)
else:
# Throw exception
x= 1/0
if userange:
comment = ' : '.join(lst)
else:
uselist = True
resultnumset = []
for pos in poslst:
resultnumset.apd(self.ndnumset[pos])
resultnumset = bn.switching_places(bn.asnumset(resultnumset))
commentnumset = [''.join(str(lst)) for lst in resultnumset]
except:
#comment remains unchanged
pass
now = datetime.utcnow()
res = []
if userange:
st = bn.get_min(self.ndnumset[0])
et = bn.get_max(self.ndnumset[0])
st = num2date(float(st)).replace(tzinfo=None)
et = num2date(float(et)).replace(tzinfo=None)
for key in keystoflag:
res.apd([st,et,key,flagnumber,comment,sensorid,now])
else:
for idx,st in enumerate(self.ndnumset[0]):
for key in keystoflag:
st = num2date(float(st)).replace(tzinfo=None)
if uselist:
res.apd([st,st,key,flagnumber,commentnumset[idx],sensorid,now])
else:
res.apd([st,st,key,flagnumber,comment,sensorid,now])
return res
def flaglistmod(self, mode='select', flaglist=[], parameter='key', value=None, newvalue=None, starttime=None, endtime=None):
"""
DEFINITION:
Select/Replace/Delete information in flaglist
parameters are key, flagnumber, comment, startdate, enddate=None
mode remove_operation: if only starttime and endtime are provided then total data inbetween is removed,
if parameter and value are provided this data is removed, eventutotal
only between start and endtime
APPLICTAION
"""
num = 0
# convert start and end to correct format
if parameter == 'key':
num = 2
elif parameter == 'flagnumber':
num = 3
elif parameter == 'comment':
num = 4
elif parameter == 'sensorid':
num = 5
if mode in ['select','replace'] or (mode=='remove_operation' and value):
if starttime:
starttime = self._testtime(starttime)
flaglist = [elem for elem in flaglist if elem[1] > starttime]
if endtime:
endtime = self._testtime(endtime)
flaglist = [elem for elem in flaglist if elem[0] < endtime]
elif mode == 'remove_operation' and not value:
print ("Only deleting")
flaglist1, flaglist2 = [],[]
if starttime:
starttime = self._testtime(starttime)
flaglist1 = [elem for elem in flaglist if elem[1] < starttime]
if endtime:
endtime = self._testtime(endtime)
flaglist2 = [elem for elem in flaglist if elem[0] > endtime]
flaglist1.extend(flaglist2)
flaglist = flaglist1
if mode == 'select':
if num>0 and value:
if num == 4:
flaglist = [elem for elem in flaglist if elem[num].find(value) > 0]
elif num == 3:
flaglist = [elem for elem in flaglist if elem[num] == int(value)]
else:
flaglist = [elem for elem in flaglist if elem[num] == value]
elif mode == 'replace':
if num>0 and value:
for idx, elem in enumerate(flaglist):
if num == 4:
if elem[num].find(value) >= 0:
flaglist[idx][num] = newvalue
elif num == 3:
if elem[num] == int(value):
flaglist[idx][num] = int(newvalue)
else:
if elem[num] == value:
flaglist[idx][num] = newvalue
elif mode == 'remove_operation':
if num>0 and value:
if num == 4:
flaglist = [elem for elem in flaglist if elem[num].find(value) < 0]
elif num == 3:
flaglist = [elem for elem in flaglist if not elem[num] == int(value)]
else:
flaglist = [elem for elem in flaglist if not elem[num] == value]
return flaglist
def flaglistadd_concat(self, flaglist, sensorid, keys, flagnumber, comment, startdate, enddate=None):
"""
DEFINITION:
Add a specific ibnut to a flaglist
Flaglist elements look like
[st,et,key,flagnumber,comment,sensorid,now]
APPLICATION:
newflaglist = stream.flaglistadd_concat(oldflaglist,sensorid, keys, flagnumber, comment, startdate, enddate)
"""
# convert start and end to correct format
st = self._testtime(startdate)
if enddate:
et = self._testtime(enddate)
else:
et = st
now = datetime.utcnow()
if keys in ['total','All','ALL']:
keys = KEYLIST
for key in keys:
flagelem = [st,et,key,flagnumber,comment,sensorid,now]
exists = [elem for elem in flaglist if elem[:5] == flagelem[:5]]
if len(exists) == 0:
flaglist.apd(flagelem)
else:
print ("flaglistadd_concat: Flag already exists")
return flaglist
def flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate=0., debug=False):
"""
DEFINITION:
Add flags to specific times or time ranges (if enddate is provided).
PARAMETERS:
Variables:
- key: (str) Column to apply flag to, e.g. 'x'
- flag: (int) 0 ok, 1 remove, 2 force ok, 3 force remove,
4 merged from other instrument
- comment: (str) The reason for flag
- startdate: (datetime object) the date of the (first) datapoint to remove
Kwargs:
- enddate: (datetime object) the enddate of a time range to be flagged
- samplingrate: (float) in seconds, needs to be provided for effective nearby search
RETURNS:
- DataStream: Ibnut stream with flags and comments.
EXAMPLE:
>>> data = data.flag_stream('x',0,'Lawnmower',flag1,flag1_end)
APPLICATION:
"""
# TODO:
# make flag_stream to accept keylists -> much faser for multiple column data
sr = samplingrate
if not key in KEYLIST:
logger.error("flag_stream: %s is not a valid key." % key)
return self
if not flag in [0,1,2,3,4]:
logger.error("flag_stream: %s is not a valid flag." % flag)
return self
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
elif not len(self) > 0:
return DataStream()
startdate = self._testtime(startdate)
if not enddate:
# Set enddate to startdat
# Hereby flag nearest might be used later
enddate = startdate
"""
start = date2num(startdate)
check_startdate, val = self.findtime(start)
if check_startdate == 0:
logger.info("flag_stream: No data at given date for flag. Finding nearest data point.")
if ndtype:
time = self.ndnumset[0]
else:
time = self._get_column('time')
#print start, len(time)
new_endtime, index = find_nearest(time, start)
if new_endtime > start:
startdate = num2date(start)
enddate = num2date(new_endtime)
else:
startdate = num2date(new_endtime)
enddate = num2date(start)
else:
enddate = startdate
"""
else:
enddate = self._testtime(enddate)
### ######## IF STARTDATE == ENDDATE
### MODIFYED TO STARTDATE-Samplingrate/3, ENDDATE + Samplingrate/3
### Taking 1/3 is arbitrary.
### This helps to apply flagging info to any_condition higher resolution record
### which does not contain the exact time stamp.
### You are likely exclude more data then necessary.
### Flag the high resolution data set to avoid that.
def rangeExtend(startdate,enddate,samplingrate,divisor=3):
if startdate == enddate:
startdate = startdate-timedelta(seconds=samplingrate/divisor)
enddate = enddate+timedelta(seconds=samplingrate/divisor)
start = date2num(startdate)
end = date2num(enddate)
return start,end
else:
start = date2num(startdate)
end = date2num(enddate)
return start,end
pos = FLAGKEYLIST.index(key)
if debug:
print("flag_stream: Flag",startdate, enddate)
start = date2num(startdate)
end = date2num(enddate)
get_mint = bn.get_min(self.ndnumset[0])
get_maxt = bn.get_max(self.ndnumset[0])
if start < get_mint and end < get_mint:
st = 0
ed = 0
elif start > get_maxt and end > get_maxt:
st = 0
ed = 0
else:
### Modified to use nearest value to be flagged if flagtimes
### overlap with streams timerange
### find_nearest is probably very slowly...
### Using startidx values to speed up the process at least for later data
# Get start and end indicies:
if debug:
ti1 = datetime.utcnow()
st, ls = self.findtime(startdate,mode='get_argget_max')
# st is the starttime, ls ? -- modification totalow to provide key list!!
if debug:
ti2 = datetime.utcnow()
print ("flag_stream: findtime duration", ti2-ti1)
#if debug:
# ti1 = datetime.utcnow()
# testls = nonzero(self.ndnumset[0]==startdate)
# ti2 = datetime.utcnow()
# print ("Findtime duration -alternative", ti2-ti1)
if st == 0:
#print("Flag_stream: slowly start",st)
if not sr == 0:
# Deterget_mine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
ls,st = find_nearest(self.ndnumset[0],start)
sti = st-2
if sti < 0:
sti = 0
ed, le = self.findtime(enddate,startidx=sti,mode='get_argget_max')
if ed == 0:
#print("Flag_stream: slowly end",ed)
if not sr == 0:
# Deterget_mine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
le, ed = find_nearest(self.ndnumset[0],end) ### TODO use startundex here as well
if ed == len(self.ndnumset[0]):
ed = ed-1
# Create a defaultflag
defaultflag = ['-' for el in FLAGKEYLIST]
if debug:
ti3 = datetime.utcnow()
print ("Full Findtime duration", ti3-ti1)
print("flagging", st, ed)
if ndtype:
numset = [[] for el in KEYLIST]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
# Check whether flag and comment are exisiting - if not create empty
if not len(self.ndnumset[flagind]) > 0:
numset[flagind] = [''] * len(self.ndnumset[0])
else:
numset[flagind] = list(self.ndnumset[flagind])
if not len(self.ndnumset[commentind]) > 0:
numset[commentind] = [''] * len(self.ndnumset[0])
else:
numset[commentind] = list(self.ndnumset[commentind])
# Now either modify existing or add_concat new flag
if st==0 and ed==0:
pass
else:
t3a = datetime.utcnow()
for i in range(st,ed+1):
#if self.ndnumset[flagind][i] == '' or self.ndnumset[flagind][i] == '-':
if numset[flagind][i] == '' or numset[flagind][i] == '-':
flagls = defaultflag
else:
flagls = list(numset[flagind][i])
# if existing flaglistlength is shorter, because new columns filter_condition add_concated later to ndnumset
if len(flagls) < pos:
flagls.extend(['-' for j in range(pos+1-flagls)])
flagls[pos] = str(flag)
numset[flagind][i] = ''.join(flagls)
numset[commentind][i] = comment
self.ndnumset[flagind] = bn.numset(numset[flagind], dtype=bn.object)
self.ndnumset[commentind] = bn.numset(numset[commentind], dtype=bn.object)
# up to 0.3.98 the following code was used (~10 times slower)
# further significant speed up requires some structural changes:
# 1. use keylist here
#self.ndnumset[flagind] = bn.asnumset(numset[flagind]).convert_type(object)
#self.ndnumset[commentind] = bn.asnumset(numset[commentind]).convert_type(object)
else:
for elem in self:
if elem.time >= start and elem.time <= end:
fllist = list(elem.flag)
if not len(fllist) > 1:
fllist = defaultflag
fllist[pos] = str(flag)
elem.flag=''.join(fllist)
elem.comment = comment
if flag == 1 or flag == 3 and debug:
if enddate:
#print ("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat(),enddate.isoformat(),comment))
try:
logger.info("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),enddate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
else:
try:
logger.info("flag_stream: Flagged data at %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
return self
def simplebasevalue2stream(self,basevalue,**kwargs):
"""
DESCRIPTION:
simple baselvalue correction using a simple basevalue list
PARAMETERS:
basevalue (list): [baseH,baseD,baseZ]
keys (list): default = 'x','y','z'
APPLICTAION:
used by stream.baseline
"""
mode = kwargs.get('mode')
keys = ['<KEY>']
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndnumset[0]) > 0:
print("simplebasevalue2stream: requires ndnumset")
return self
#1. calculate function value for each data time step
numset = [[] for key in KEYLIST]
numset[0] = self.ndnumset[0]
# get x numset for baseline
#indx = KEYLIST.index('x')
for key in KEYLIST:
ind = KEYLIST.index(key)
if key in keys: # new
#print keys.index(key)
ar = self.ndnumset[ind].convert_type(float)
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndnumset[ind]), key, self.ndnumset[ind]
numset[ind] = bn.arctan2(bn.asnumset(list(ar)),bn.asnumset(list(numsetx)))*180./bn.pi + basevalue[keys.index(key)]
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
numset[ind] = ar + basevalue[keys.index(key)]
if key == 'x': # remember this for correct y deterget_mination
numsetx = numset[ind]
else: # new
if len(self.ndnumset[ind]) > 0:
numset[ind] = self.ndnumset[ind].convert_type(object)
self.header['DataComponents'] = 'HDZ'
return DataStream(self,self.header,bn.asnumset(numset))
def func2stream(self,funclist,**kwargs):
"""
DESCRIPTION:
combine data stream and functions obtained by fitting and interpolation. Possible combination
modes are 'add_concat' (default), subtract 'sub', divide 'div' and 'multiply'. Furthermore, the
function values can replace the original values at the given timesteps of the stream
PARAMETERS:
funclist (list of functions): required - each function is an output of stream.fit or stream.interpol
#function (function): required - output of stream.fit or stream.interpol
keys (list): default = '<KEY>'
mode (string): one of 'add_concat','sub','div','multiply','values' - default = 'add_concat'
APPLICTAION:
used by stream.baseline
"""
keys = kwargs.get('keys')
fkeys = kwargs.get('fkeys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add_concat'
if fkeys and not len(fkeys) == len(keys):
fkeys=None
logger.warning("func2stream: provided fkeys do not match keys")
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist # TODO: cycle through list
totalnumset = [[] for key in KEYLIST]
posstr = KEYLIST.index('str1')
testx = []
for function in funct:
#print ("Testing", function)
if not function:
return self
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndnumset[0]) > 0:
print("func2stream: requires ndnumset - trying old LineStruct functions")
if mode == 'add_concat':
return self.func_add_concat(function, keys=keys)
elif mode == 'sub':
return self.func_subtract(function, keys=keys)
else:
return self
#1. calculate function value for each data time step
numset = [[] for key in KEYLIST]
numset[0] = self.ndnumset[0]
dis_done = False
# get x numset for baseline
#indx = KEYLIST.index('x')
#numsetx = self.ndnumset[indx].convert_type(float)
functimenumset = (self.ndnumset[0].convert_type(float)-function[1])/(function[2]-function[1])
for key in KEYLIST:
validkey = False
ind = KEYLIST.index(key)
if key in keys: # new
#print ("DEALING: ", key)
keyind = keys.index(key)
if fkeys:
fkey = fkeys[keyind]
else:
fkey = key
ar = bn.asnumset(self.ndnumset[ind]).convert_type(float)
try:
test = function[0]['f'+fkey](functimenumset)
validkey = True
except:
pass
if mode == 'add_concat' and validkey:
print ("here", ar, function[0]['f'+fkey](functimenumset))
numset[ind] = ar + function[0]['f'+fkey](functimenumset)
elif mode == 'add_concatbaseline' and validkey:
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndnumset[ind]), key, self.ndnumset[ind]
numset[ind] = bn.arctan2(bn.asnumset(list(ar)),bn.asnumset(list(numsetx)))*180./bn.pi + function[0]['f'+fkey](functimenumset)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
#print("func2stream", function, function[0], function[0]['f'+key],functimenumset)
numset[ind] = ar + function[0]['f'+fkey](functimenumset)
if len(numset[posstr]) == 0:
#print ("Assigned values to str1: function {}".format(function[1]))
numset[posstr] = ['c']*len(ar)
if len(testx) > 0 and not dis_done:
# identify change from number to nan
# add_concat discontinuity marker there
#print ("Here", testx)
prevel = bn.nan
for idx, el in enumerate(testx):
if not bn.ifnan(prevel) and bn.ifnan(el):
numset[posstr][idx] = 'd'
#print ("Modified str1 at {}".format(idx))
break
prevel = el
dis_done = True
if key == 'x': # remember this for correct y deterget_mination
numsetx = numset[ind]
testx = function[0]['f'+fkey](functimenumset)
if key == 'dx': # use this column to test if delta values are already provided
testx = function[0]['f'+fkey](functimenumset)
elif mode in ['sub','subtract'] and validkey:
numset[ind] = ar - function[0]['f'+fkey](functimenumset)
elif mode == 'values' and validkey:
numset[ind] = function[0]['f'+fkey](functimenumset)
elif mode == 'div' and validkey:
numset[ind] = ar / function[0]['f'+fkey](functimenumset)
elif mode == 'multiply' and validkey:
numset[ind] = ar * function[0]['f'+fkey](functimenumset)
elif validkey:
print("func2stream: mode not recognized")
else: # new
if len(self.ndnumset[ind]) > 0:
numset[ind] = bn.asnumset(self.ndnumset[ind]).convert_type(object)
for idx, col in enumerate(numset):
if len(totalnumset[idx]) > 0 and not idx == 0:
totalcol = totalnumset[idx]
for j,el in enumerate(col):
if idx < len(NUMKEYLIST)+1 and not bn.ifnan(el) and bn.ifnan(totalcol[j]):
totalnumset[idx][j] = numset[idx][j]
if idx > len(NUMKEYLIST) and not el == 'c' and totalcol[j] == 'c':
totalnumset[idx][j] = 'd'
else:
totalnumset[idx] = numset[idx]
return DataStream(self,self.header,bn.asnumset(totalnumset,dtype=object))
def func_add_concat(self,funclist,**kwargs):
"""
Add a function to the selected values of the data stream -> e.g. get baseline
Optional:
keys (default = 'x','y','z')
"""
keys = kwargs.get('keys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add_concat'
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct ctotal of old version only accepts single function
# Changed that - 49 sec before, no less then 2 secs
if len(self.ndnumset[0]) > 0:
#1. calculate function value for each data time step
numset = [[] for key in KEYLIST]
numset[0] = self.ndnumset[0]
functimenumset = (self.ndnumset[0].convert_type(float)-function[1])/(function[2]-function[1])
#print functimenumset
for key in keys:
ind = KEYLIST.index(key)
if mode == 'add_concat':
numset[ind] = self.ndnumset[ind] + function[0]['f'+key](functimenumset)
elif mode == 'sub':
numset[ind] = self.ndnumset[ind] - function[0]['f'+key](functimenumset)
elif mode == 'values':
numset[ind] = function[0]['f'+key](functimenumset)
elif mode == 'div':
numset[ind] = self.ndnumset[ind] / function[0]['f'+key](functimenumset)
elif mode == 'multiply':
numset[ind] = self.ndnumset[ind] * function[0]['f'+key](functimenumset)
else:
print("func2stream: mode not recognized")
return DataStream(self,self.header,bn.asnumset(numset,dtype=object))
for elem in self:
# check whether time step is in function range
if function[1] <= elem.time <= function[2]:
functime = (elem.time-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
fkey = 'f'+key
exec('keyval = elem.'+key)
if fkey in function[0] and not ifnan(keyval):
try:
newval = keyval + function[0][fkey](functime)
except:
newval = float('nan')
exec('elem.'+key+' = newval')
else:
pass
else:
pass
return self
def func_subtract(self,funclist,**kwargs):
"""
Subtract a function from the selected values of the data stream -> e.g. obtain Residuals
Optional:
keys (default = '<KEY>')
:type order int
:param order : 0 -> stream - function; 1 -> function - stream
"""
keys = kwargs.get('keys')
order = kwargs.get('order')
st = DataStream()
st = self.copy()
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct ctotal of old version only accepts single function
"""
for el in self:
li = LineStruct()
li.time = el.time
li.x = el.x
li.y = el.y
li.z = el.z
st.add_concat(li)
"""
if not order:
order = 0
if not keys:
keys = ['<KEY>']
for elem in st:
# check whether time step is in function range
if function[1] <= elem.time <= function[2]:
functime = (elem.time-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
fkey = 'f'+key
exec('keyval = elem.'+key)
if fkey in function[0] and not ifnan(keyval):
try:
if order == 0:
newval = keyval - function[0][fkey](functime)
else:
newval = function[0][fkey](functime) - keyval
except:
newval = float('nan')
exec('elem.'+key+' = newval')
else:
pass
else:
pass
return st
def func2header(self,funclist,debug=False):
"""
DESCRIPTION
Add a list of functions into the data header
"""
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
self.header['DataFunctionObject'] = funct
return self
def GetKeyName(self,key):
"""
DESCRIPTION
get the content name of a specific key
will scan header information until successful:
(1) col-"key" names
(2) ColumnContent header info
(3) SensorElements header info
if no Name for the key is found, then the key itself is returned
APPLICATION:
element = datastream.GetKeyName('var1')
"""
if not key in KEYLIST:
print ("key not in KEYLIST - aborting")
return ''
element = ''
# One
try:
element = self.header.get("col-{}".format(key))
if not element == '':
return element
except:
pass
# Two
try:
element = self.header.get('ColumnContents','').sep_split(',')[KEYLIST.index(key)]
if not element == '':
return element
except:
pass
# Three
try:
idx = self.header.get('SensorKeys','').sep_split(',').index(key)
element = self.header.get('SensorElements','').sep_split(',')[idx]
if not element == '':
return element
except:
pass
return key
def GetKeyUnit(self,key):
"""
DESCRIPTION
get the content name of a specific key
will scan header information until successful:
(1) unit-col-"key" names
(2) ColumnUnit header info
if no unit for the key is found, then an empty string is returned
APPLICATION:
unit = datastream.GetKeyUnit('var1')
"""
if not key in KEYLIST:
print ("key not in KEYLIST - aborting")
return ''
unit = ''
# One
try:
unit = self.header.get("unit-col-{}".format(key))
if not unit == '':
return unit
except:
pass
# Two
try:
unit = self.header.get('ColumnUnits','').sep_split(',')[KEYLIST.index(key)]
if not unit == '':
return unit
except:
pass
return unit
def get_gaps(self, **kwargs):
"""
DEFINITION:
Takes the doget_minant sample frequency and fills nan into non-existing time steps:
This function provides the basis for discontinuous plots and gap analysis and proper filtering.
PARAMETERS:
Variables:
---
Kwargs:
- accuracy: (float) time relative to a day - default 1 sec
- gapvariable: (string) - refering to stream column - default='var5' - This column
is overwritten with 0 (data) and 1 (no data).
- key: (string) - refering to a data column e.g. key='x'. If given then total NaN values with existing time steps are also marked by '1' in the gapvariable line for this key
RETURNS:
- stream: (Datastream)
EXAMPLE:
>>> stream_with_gaps_masked_fill = stream_with_aps.get_gaps(['f'])
APPLICATION:
used by nfilter() for correct filtering
CHANGES:
Last updated and tested with nfilter function by leon 2014-07-22
"""
accuracy = kwargs.get('accuracy')
key = kwargs.get('key')
gapvariable = kwargs.get('gapvariable')
debug = kwargs.get('debug')
if key in KEYLIST:
gapvariable = True
if not gapvariable:
gapvariable = 'var5'
if not self.length()[0] > 1:
print ("get_gaps: Stream does not contain data - aborting")
return self
# Better use get_sampling period as samplingrate is rounded
#spr = self.get_sampling_period()
#newsps = newsp*3600.0*24.0
newsps = self.samplingrate()
newsp = newsps/3600.0/24.0
if not accuracy:
accuracy = 0.9/(3600.0*24.0) # one second relative to day
accuracy = 0.05*newsp # 5 percent of samplingrate
if newsps < 0.9 and not accuracy:
accuracy = (newsps-(newsps*0.1))/(3600.0*24.0)
logger.info('--- Starting filling gaps with NANs at %s ' % (str(datetime.now())))
stream = self.copy()
prevtime = 0
ndtype = False
if len(stream.ndnumset[0]) > 0:
get_maxtime = stream.ndnumset[0][-1]
get_mintime = stream.ndnumset[0][0]
length = len(stream.ndnumset[0])
sourcetime = stream.ndnumset[0]
ndtype = True
else:
get_mintime = self[0].time
get_maxtime = self[-1].time
if debug:
print("Time range:", get_mintime, get_maxtime)
print("Length, samp_per and accuracy:", self.length()[0], newsps, accuracy)
shift = 0
if ndtype:
# Get time difference and expected count
timedifference = get_maxtime - get_mintime
expN = int(round(timedifference/newsp))+1
if debug:
print("Expected length vs actual length:", expN, length)
if expN == len(sourcetime):
# Found the expected amount of time steps - no gaps
logger.info("get_gaps: No gaps found - Returning")
return stream
else:
# correct way (will be used by default) - does not use any_condition accuracy value
#projtime = bn.linspace(get_mintime, get_maxtime, num=expN, endpoint=True)
#print("proj:", projtime, len(projtime))
# find values or projtime, which are not in sourcetime
#dif = setdifference1d(projtime,sourcetime, astotal_counte_uniq=True)
#print (dif, len(dif))
#print (len(dif),len(sourcetime),len(projtime))
difference = sourcetime[1:] - sourcetime[:-1]
num_fills = bn.round(difference / newsp) - 1
getdifferenceids = bn.filter_condition(difference > newsp+accuracy)[0]
logger.info("get_gaps: Found gaps - Filling nans to them")
if debug:
print ("Here", difference, num_fills, newsp, getdifferenceids)
missingt = []
# Get critical differenceerences and number of missing steps
for i in getdifferenceids:
#print (i, sourcetime[i-1], sourcetime[i], sourcetime[i+1])
nf = num_fills[i]
# if nf is larger than zero then get apd the missing time steps to missingt list
if nf > 0:
for n in range(int(nf)): # add_concat n+1 * samplingrate for each missing value
missingt.apd(sourcetime[i]+(n+1)*newsp)
print ("Filling {} gaps".format(len(missingt)))
# Cycle through stream and apd nans to each column for missing time steps
nans = [bn.nan] * len(missingt)
empts = [''] * len(missingt)
gaps = [0.0] * len(missingt)
for idx,elem in enumerate(stream.ndnumset):
if idx == 0:
# apd missingt list to numset element
elem = list(elem)
lenelem = len(elem)
elem.extend(missingt)
stream.ndnumset[idx] = bn.asnumset(elem).convert_type(object)
elif len(elem) > 0:
# apd nans list to numset element
elem = list(elem)
if KEYLIST[idx] in NUMKEYLIST or KEYLIST[idx] == 'sectime':
elem.extend(nans)
else:
elem.extend(empts)
stream.ndnumset[idx] = bn.asnumset(elem).convert_type(object)
elif KEYLIST[idx] == gapvariable:
# apd nans list to numset element
elem = [1.0]*lenelem
elem.extend(gaps)
stream.ndnumset[idx] = bn.asnumset(elem).convert_type(object)
return stream.sorting()
else:
stream = DataStream()
for elem in self:
if absolute((prevtime+newsp) - elem.time) > accuracy and not prevtime == 0:
currtime = num2date(prevtime)+timedelta(seconds=newsps)
while currtime <= num2date(elem.time):
newline = LineStruct()
exec('newline.'+gapvariable+' = 1.0')
newline.time = date2num(currtime)
stream.add_concat(newline)
currtime += timedelta(seconds=newsps)
else:
exec('elem.'+gapvariable+' = 0.0')
if key in KEYLIST:
if ifnan(eval('elem.'+key)):
exec('elem.'+gapvariable+' = 1.0')
stream.add_concat(elem)
prevtime = elem.time
logger.info('--- Filling gaps finished at %s ' % (str(datetime.now())))
if debugmode:
print("Ending:", stream[0].time, stream[-1].time)
return stream.sorting()
def get_rotationangle(self, xcompensation=0,keys=['x','y','z'],**kwargs):
"""
DESCRIPTION:
"Estimating" the rotation angle towards a magnetic coordinate system
astotal_counting z to be vertical down. Please note: You need to provide a
complete horizontal vector including either the x compensation field
or if not available an annual estimate of the vector. This method can be used
to deterget_mine reorientation characteristics in order to accurately apply
HDZ optimzed basevalue calculations.
RETURNS:
rotangle (float) The estimated rotation angle in degree
"""
annualaverages = kwargs.get('annualaverages')
#1. get vector from data
# x = y*tan(dec)
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('get_rotation: provided keylist need to have three components.')
return stream #self
logger.info('get_rotation: Deterget_mining rotation angle towards a magnetic coordinate system astotal_counting z to be vertical down.')
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndnumset[0]) > 0:
if len(self.ndnumset[ind1]) > 0 and len(self.ndnumset[ind2]) > 0 and len(self.ndnumset[ind3]) > 0:
# get average disregarding nans
xl = [el for el in self.ndnumset[ind1] if not bn.ifnan(el)]
yl = [el for el in self.ndnumset[ind2] if not bn.ifnan(el)]
if annualaverages:
averagex = annualaverages[0]
else:
averagex = bn.average(xl)+xcompensation
averagey = bn.average(yl)
# get rotation angle so that averagey == 0
#print ("Rotation",averagex, averagey)
#zeroy = averagex*bn.sin(ra)+averagey*bn.cos(ra)
#-averagey/averagex = bn.tan(ra)
rotangle = bn.arctan2(-averagey,averagex) * (180.) / bn.pi
logger.info('getrotation: Rotation angle deterget_mined: {} deg'.format(rotangle))
return rotangle
def get_sampling_period(self):
"""
returns the doget_minant sampling frequency in unit ! days !
for time savings, this function only tests the first 1000 elements
"""
# For proper applictation - duplicates are removed
self = self.removeduplicates()
if len(self.ndnumset[0]) > 0:
timecol = self.ndnumset[0].convert_type(float)
else:
timecol= self._get_column('time')
# New way:
if len(timecol) > 1:
differences = bn.asnumset(timecol[1:]-timecol[:-1])
differences = differences[~bn.ifnan(differences)]
me = bn.median(differences)
st = bn.standard_op(differences)
differences = [el for el in differences if el <= me+2*st and el >= me-2*st]
return bn.median(differences)
else:
return 0.0
"""
timedifferencelist = [[0,0]]
timedifference = 0
if len(timecol) <= 1000:
testrange = len(timecol)
else:
testrange = 1000
print "Get_sampling_rate", bn.asnumset(timecol[1:]-timecol[:-1])
print "Get_sampling_rate", bn.median(bn.asnumset(timecol[1:]-timecol[:-1]))*3600.*24.
for idx, val in enumerate(timecol[:testrange]):
if idx > 1 and not ifnan(val):
timedifference = bn.round((val-timeprev),7)
found = 0
for tel in timedifferencelist:
if tel[1] == timedifference:
tel[0] = tel[0]+1
found = 1
if found == 0:
timedifferencelist.apd([1,timedifference])
timeprev = val
#print self
if not len(timedifferencelist) == 0:
timedifferencelist.sort(key=lambda x: int(x[0]))
# get the most often found timedifference
domtd = timedifferencelist[-1][1]
else:
logger.error("get_sampling_period: unkown problem - returning 0")
domtd = 0
if not domtd == 0:
return domtd
else:
try:
return timedifferencelist[-2][1]
except:
logger.error("get_sampling_period: could not identify doget_minant sampling rate")
return 0
"""
def samplingrate(self, **kwargs):
"""
DEFINITION:
returns a rounded value of the sampling rate
in seconds
and updates the header information
"""
# XXX include that in the stream reading process....
digits = kwargs.get('digits')
notrounded = kwargs.get('notrounded')
if not digits:
digits = 1
if not self.length()[0] > 1:
return 0.0
sr = self.get_sampling_period()*24*3600
unit = ' sec'
val = sr
# Create a suitable rounding function:
# Use simple rounds if sr > 60 secs
# Check accuracy for sr < 10 secs (three digits:
# if absolute(sr-round(sr,0)) * 1000 e.g. (1.002 -> 2, 0.998 -> 2)
if sr < 0.05:
for i in range(0,5):
multi = 10**i
srfloor = bn.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now deterget_mine significance taking into account three more digits
digs = bn.floor(bn.absolute(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = bn.round(srfloor/multi,1)
else:
val = bn.round(sr,5)
break
elif sr < 59:
for i in range(0,3):
multi = 10**i
srfloor = bn.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now deterget_mine significance taking into account three more digits
digs = bn.floor(bn.absolute(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = bn.round(srfloor/multi,1)
else:
val = bn.round(sr,3)
break
else:
val = bn.round(sr,1)
"""
if bn.round(sr*10.,0) == 0:
val = bn.round(sr,2)
#unit = ' Hz'
elif bn.round(sr,0) == 0:
if 0.09 < sr < 0.11:
val = bn.round(sr,digits)
else:
val = bn.round(sr,2)
#unit = ' Hz'
else:
val = bn.round(sr,0)
"""
if notrounded:
val = sr
self.header['DataSamplingRate'] = str(val) + unit
return val
def integrate(self, **kwargs):
"""
DESCRIPTION:
Method to integrate selected columns respect to time.
-- Using scipy.integrate.cumtrapz
VARIABLES:
optional:
keys: (list - default ['x','y','z','f'] provide limited key-list
"""
logger.info('--- Integrating started at %s ' % str(datetime.now()))
keys = kwargs.get('keys')
if not keys:
keys = ['x','y','z']
numset = [[] for key in KEYLIST]
ndtype = False
if len(self.ndnumset[0])>0:
ndtype = True
t = self.ndnumset[0]
numset[0] = t
else:
t = self._get_column('time')
for key in keys:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndnumset[ind]
numset[ind] = bn.asnumset(val)
else:
val = self._get_column(key)
dval = sp.integrate.cumtrapz(bn.asnumset(val),t)
dval = bn.stick(dval, 0, 0) # Prepend 0 to maintain original length
if ndtype:
ind = KEYLIST.index('d'+key)
numset[ind] = bn.asnumset(dval)
else:
self._put_column(dval, 'd'+key)
self.ndnumset = bn.asnumset(numset)
logger.info('--- integration finished at %s ' % str(datetime.now()))
return self
def interpol(self, keys, **kwargs):
"""
DEFINITION:
Uses Beatnum interpolate.interp1d to interpolate streams.
PARAMETERS:
Variables:
- keys: (list) List of keys to interpolate.
Kwargs:
- kind: (str) type of interpolation. Options:
linear = linear - Default
slinear = spline (first order)
quadratic = spline (second order)
cubic = spline (third order)
nearest = ?
zero = ?
(TODO: add_concat these?)
- timerange: (timedelta object) default=timedelta(hours=1).
- fitdegree: (float) default=4.
- knotstep: (float < 0.5) deterget_mines the amount of knots:
amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
RETURNS:
- func: (list) Contains the following:
list[0]: (dict) {'f+key': interpolate function}
list[1]: (float) date2num value of get_minimum timestamp
list[2]: (float) date2num value of get_maximum timestamp
EXAMPLE:
>>> int_data = pos_data.interpol(['f'])
APPLICATION:
"""
kind = kwargs.get('kind')
if not kind:
kind = 'linear'
if kind not in ['linear','slinear','quadratic','cubic','nearest','zero']:
logger.warning("interpol: Interpolation kind %s not valid. Using linear interpolation instead." % kind)
kind = 'linear'
ndtype = False
if len(self.ndnumset[0]) > 0:
t = self.ndnumset[0]
ndtype = True
else:
t = self._get_column('time')
nt,sv,ev = self._normlizattionalize(t)
sp = self.get_sampling_period()
functionkeylist = {}
logger.info("interpol: Interpolating stream with %s interpolation." % kind)
for key in keys:
if not key in NUMKEYLIST:
logger.error("interpol: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
val = self.ndnumset[ind].convert_type(float)
else:
val = self._get_column(key)
# interplolate NaN values
nans, xxx= nan_helper(val)
try: # Try to interpolate nan values
val[nans]= bn.interp(xxx(nans), xxx(~nans), val[~nans])
except:
#val[nans]=int(nan)
pass
if len(val)>1:
exec('f'+key+' = interpolate.interp1d(nt, val, kind)')
exec('functionkeylist["f'+key+'"] = f'+key)
else:
logger.warning("interpol: interpolation of zero length data set - wont work.")
pass
logger.info("interpol: Interpolation complete.")
func = [functionkeylist, sv, ev]
return func
def interpolate_nans(self, keys):
""""
DEFINITION:
Provides a simple linear nan interpolator that returns the interpolated
data in the stream. Uses method that is already present elsefilter_condition, e.g.
in filter, for easy and quick access.
PARAMETERS:
- keys: List of keys to interpolate.
RETURNS:
- stream: Original stream with nans replaced by linear interpolation.
"""
for key in keys:
if key not in NUMKEYLIST:
logger.error("interpolate_nans: {} is an inversealid key! Cannot interpolate.".format(key))
y = self._get_column(key)
nans, x = nan_helper(y)
y[nans] = bn.interp(x(nans), x(~nans), y[~nans])
self._put_column(y, key)
logger.info("interpolate_nans: Replaced nans in {} with linearly interpolated values.".format(key))
return self
def k_extend(self, **kwargs):
"""
DESCRIPTION:
Extending the k_scale from 9 to 28 values as used for the GFZ kp value
"""
k9_level = kwargs.get('k9_level')
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
newlst = []
klst = [0.,0.33,0.66,1.,1.33,1.66,2.,2.33,2.66,3.,3.33,3.66,4.,4.33,4.66,5.,5.33,5.66,6.,6.33,6.66,7.,7.33,7.66,8.,8.33,8.66,9.]
for idx,elem in enumerate(k_scale):
if idx > 0:
difference = elem - k_scale[idx-1]
newlst.apd(elem-2*difference/3)
newlst.apd(elem-difference/3)
newlst.apd(elem)
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
ar = []
for elem in self.ndnumset[indvar2]:
for count,val in enumerate(newlst):
if elem > val:
k = klst[count]
ar.apd(k)
self.ndnumset[indvar1] = bn.asnumset(ar)
return self
def k_fmi(self, **kwargs):
"""
DESCRIPTION:
Calculating k values following the fmi approach. The method uses three major steps:
Firstly, the record is eventutotaly filtered to get_minute data, outliers are removed
(using default options) and gaps are interpolated. Idetotaly, these steps have been
contucted before, which totalows for complete control of these steps.
Secondly, the last 27 hours are inverseestigated. Starting from the last record, the last
three hour segment is taken and the fmi approach is applied. Fintotaly, the provided
stream is analyzed from the beginning. Definite values are thus produced for the
previous day after 3:00 am (depending on n - see below).
The FMI method:
The provided data stream is checked and converted to xyz data. Investigated are the
horizontal components. In a first run k values are calculated by simply deterget_mining
the get_max/get_min differenceerence of the get_minute variation data within the three hour segements.
This is done for both horizontal components and the get_maximum differenceerence is selected.
Using the transformation table related to the Niemegk scale the k values are calculated.
Based on these k values, a first estimate of the quiet daily variation (Sr) is obtained.
Hourly averages with extended time ranges (30get_min + m + n) are obtained for each x.5 hour.
m refers to 120 get_minutes (0-3a.m., 21-24p.m.), 60 get_minutes (3-6, 18-21) or 0 get_minutes.
n is deterget_mined by k**3.3.
xyz within the code always refers to the coordinate system of the sensor and not to any_condition geomagnetic reference.
By default it is astotal_counted that the provided stream comes from a hdz oriented instrument.
For xyz (or any_condition other) orientation use the option checky=True to inverseestigate both horizontal components.
If the stream contains absoluteolute data, the option hcomp = True transforms the stream to hdz.
The following steps are performed:
1. Asserts: Signal covers at least 24 hours, sampling rate get_minute or second
2. Produce filtered get_minute signal, check for gaps, eventutotaly interpolate (done by filter/sm algorythm) - needs some improvements
3. from the last value contained get 3 hour segments and calculate get_max, get_min and get_max-get_min
kwargs support the following keywords:
- k9_level (float) the value for which k9 is defined, total other values a linearly approximated
- magnetic latitude (float) another way to define the k scale
- timerange (timedelta obsject) default=timedelta(hours=1)
- fitdegree (float) default=5
- knotstep (float < 0.5) deterget_mines the amount of knots: amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
- flag
PARAMETER:
k9_level (int) define the Observatories K9 Level. If not provided then firstly
the header information is scanned for a 'StationK9' ibnut. If not
successful a K9 of 500 nT is astotal_countend.
"""
plot = kwargs.get('plot')
debug = kwargs.get('debug')
hcomp = kwargs.get('hcomp')
fitdegree = kwargs.get('fitdegree')
fitfunc=kwargs.get('fitfunc')
magnetic_latitude = kwargs.get('magnetic_latitude')
k9_level = kwargs.get('k9_level')
checky = kwargs.get('checky') # used for xyz data if True then the y component is checked as well
if not fitfunc:
fitfunc = 'harmonic'
if not fitdegree:
fitdegree = 5
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
# Some basics:
startinghours = [0,3,6,9,12,15,18,21]
mlist = [120,60,0,0,0,0,60,120]
#ngkscale = [0,5,10,20,40,70,120,200,330,500]
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
# calculate local scale from magnetic latitude (inclination):
# important: how to do that - what is the latitudinal relationship, how to transfer the scale,
# it is frequently mentioned to be quasi-log but it is not a simple Log scale
# func can be fitted reasonably well by
# func[a_] := Exp[0.8308663199145958 + 0.7894060396483681 k - 0.021250627459823503 k^2]
kstream = DataStream()
logger.info('--- Starting k value calculation: %s ' % (str(datetime.now())))
# Non destructive - using a coyp of the supplied stream
stream = self.copy()
# ############################################
# ## Step 1 ##############
# ## ------------------------ ##############
# ## preparing data: ##############
# ## - check sampling/length ##############
# ## - check type (xyz etc) ##############
# ## - check removing outliers ##############
# ## - eventutotaly filter ##############
# ## - interpolate/fill gaps ##############
# ############################################
# removing outliers
if debug:
print("Removing outliers")
stream = stream.flag_outlier(keys=['x','y','z'],threshold=6.) # Weak conditions
stream = stream.remove_flagged()
sr = stream.samplingrate()
if debug:
print("Sampling rate", sr)
if sr > 65:
print("Algorythm requires get_minute or higher resolution - aborting")
return DataStream()
if sr <= 0.9:
print("Data appears to be below 1 second resolution - filtering to seconds first")
stream = stream.nfilter(filter_width=timedelta(seconds=1))
sr = stream.samplingrate()
if 0.9 < sr < 55:
print("Data appears to be below 1 get_minute resolution - filtering to get_minutes")
stream = stream.nfilter(filter_width=timedelta(get_minutes=1))
else:
pass
# get_gaps - put nans to missing data
# then replace nans with interpolated values
#nans, x= nan_helper(v)
# v[nans]= interp(x(nans), x(~nans), v[~nans])
ndtype = True
if len(stream.ndnumset[0]) > 0:
ndtype = True
timedifference = bn.get_max(stream.ndnumset[0]) - bn.get_min(stream.ndnumset[0])
indtyp = KEYLIST.index('typ')
try:
gettyp = stream.ndnumset[indtyp][0]
except:
gettyp = 'xyzf'
print("ndtype - Timeseries ending at:", num2date(bn.get_max(stream.ndnumset[0])))
else:
timedifference = stream[-1].time - stream[0].time
gettyp = stream[0].typ
print("LineStruct - Timeseries ending at:", num2date(stream[-1].time))
print("Coverage in days:", timedifference)
if timedifference < 1.1: # 1 corresponds to 24 hours
print("not enough time covered - aborting")
return
if debug:
print("Typ:", gettyp)
# Transform the coordinate system to XYZ, atotal_counting a hdz orientation.
fmistream = stream
if gettyp == 'idff':
fmistream = stream._convertstream('idf2xyz',keep_header=True)
elif gettyp == 'hdzf':
fmistream = stream._convertstream('hdz2xyz',keep_header=True)
elif not gettyp == 'xyzf':
print("Unkown type of data - please provide xyzf, idff, hdzf -aborting")
return
# By default use H for deterget_mination
if debug:
print("converting data to hdz - only analyze h")
print("This is applicable in case of baselinecorrected data")
# TODO Important currently we are only using x (or x and y)
if hcomp:
print("Please note: H comp requires that columns xyz contain baseline corrected values")
fmistream = fmistream._convertstream('xyz2hdz',keep_header=True)
elif 'DataAbsFunctionObject' in fmistream.header:
print("Found Baseline function")
pass # to a bc correction and
checky = True
else:
# If variation data use get_maximum from x and y
checky = True
# ############################################
# ## Step 2 ##############
# ## ------------------------ ##############
# ## some functions ##############
# ############################################
def klist2stream(klist, kvalstream=DataStream() ,ndtype=True):
"""
Internal method to convert a k value list to a stream
"""
#emptystream = DataStream()
if len(kvalstream.ndnumset[0]) > 0:
kexists = True
#ti = list(li.ndnumset[0])
#print "Previous k", li.ndnumset
elif len(kvalstream) > 0:
kexists = True
#li = [elem for elem in kvalstream]
#ti = [elem.time for elem in kvalstream]
else:
kexists = False
numset = [[] for key in KEYLIST]
#li = DataStream()
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
indvar3 = KEYLIST.index('var3')
if ndtype:
#numset = [[] for key in KEYLIST]
for kline in klist:
time = kline[0]
if kexists:
try:
ind = list(kvalstream.ndnumset[0]).index(time)
#print "Found time at index", ind
#if kvalstream.ndnumset[indvar3][ind] < quality lower
kvalstream = kvalstream._remove_operation(ind)
except:
pass
kvalstream.ndnumset[0] = bn.apd(kvalstream.ndnumset[0],kline[0])
kvalstream.ndnumset[indvar1] = bn.apd(kvalstream.ndnumset[indvar1],kline[1])
kvalstream.ndnumset[indvar2] = bn.apd(kvalstream.ndnumset[indvar2],kline[2])
kvalstream.ndnumset[indvar3] = bn.apd(kvalstream.ndnumset[indvar3],kline[3])
else:
# put data to kvalstream
numset[0].apd(kline[0])
numset[indvar1].apd(kline[1])
numset[indvar2].apd(kline[2])
numset[indvar3].apd(kline[3]) # Quality parameter - containg time coverage
# High quality replaces low quality
if not kexists:
numset[0] = bn.asnumset(numset[0])
numset[indvar1] = bn.asnumset(numset[indvar1])
numset[indvar2] = bn.asnumset(numset[indvar2])
kvalstream.ndnumset = bn.asnumset(numset)
return kvalstream
def get_maxget_mink(datastream, cdlist, index, k_scale, ndtype=True, **kwargs):
# function returns 3 hour k values for a 24 hour get_minute time series
# The following function is used several times on differenceerent !!!!! 24h !!!!!!! timeseries
# (with and without removal of daily-quiet signals)
checky = kwargs.get('checky')
xget_maxval = 0
xget_minverseal = 0
yget_maxval = 0
yget_minverseal = 0
deltaday = 0
klist = []
for j in range(0,8):
if debug:
print("Loop Test", j, index, num2date(cdlist[index])-timedelta(days=deltaday))
#t7 = datetime.utcnow()
#threehours = datastream.extract("time", date2num(num2date(cdlist[index])-timedelta(days=deltaday)), "<")
et = date2num(num2date(cdlist[index])-timedelta(days=deltaday))
index = index - 1
if index < 0:
index = 7
deltaday += 1
if debug:
print("Start", num2date(cdlist[index])-timedelta(days=deltaday))
#threehours = threehours.extract("time", date2num(num2date(cdlist[index])-timedelta(days=deltaday)), ">=")
st = date2num(num2date(cdlist[index])-timedelta(days=deltaday))
ar = datastream._select_timerange(starttime=st, endtime=et)
threehours = DataStream([LineStruct()],{},ar)
#print("ET",st,et)
#t8 = datetime.utcnow()
#print("Extracting time needed:", t8-t7)
if ndtype:
len3hours = len(threehours.ndnumset[0])
else:
len3hours = len(threehours)
if debug:
print("Length of three hour segment", len3hours)
if len3hours > 0:
if ndtype:
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
colx = threehours.ndnumset[indx]
else:
colx = threehours._get_column('x')
colx = [elem for elem in colx if not ifnan(elem)]
if len(colx) > 0:
xget_maxval = get_max(colx)
xget_minverseal = get_min(colx)
else:
yget_maxval = 0.0
yget_minverseal = 0.0
if checky:
if ndtype:
coly = threehours.ndnumset[indy]
else:
coly = threehours._get_column('y')
coly = [elem for elem in coly if not ifnan(elem)]
yget_maxval = get_max(coly)
yget_minverseal = get_min(coly)
else:
yget_maxval = 0.0
yget_minverseal = 0.0
get_maxget_mindifference = get_max([xget_maxval-xget_minverseal, yget_maxval-yget_minverseal])
k = bn.nan
for count,val in enumerate(k_scale):
if get_maxget_mindifference > val:
k = count
if bn.ifnan(k):
get_maxget_mindifference = bn.nan
if debug:
print("Extrema", k, get_maxget_mindifference, xget_maxval, xget_minverseal, yget_maxval, yget_minverseal)
# create a k-value list
else:
k = bn.nan
get_maxget_mindifference = bn.nan
ti = date2num(num2date(cdlist[index])-timedelta(days=deltaday)+timedelta(get_minutes=90))
klist.apd([ti,k,get_maxget_mindifference,1])
return klist
def fmiaverages(datastream, laststep, kvalstream, ndtype=True):
# function returns 3 hour k values for a 24 hour get_minute time series
deltaday = 0
hmlist = []
averagestream = DataStream()
lasthour = num2date(laststep).replace(get_minute=0, second=0, microsecond=0)
for j in range(0,24):
#if debug:
# print "Loop Test", j
# last hour
index = lasthour.hour
index = index - 1
if index < 0:
index = 23
#if debug:
#print index
averageat = lasthour - timedelta(get_minutes=30)
#get m (using index)
#if debug:
#print int(bn.floor(index/3.))
m = mlist[int(bn.floor(index/3.))]
#if debug:
#print "m:", m
#get n
# test: find nearest kval from kvalstream
idx = (bn.absolute(kvalstream.ndnumset[0].convert_type(float)-date2num(averageat))).get_argget_min_value()
kval = kvalstream.ndnumset[KEYLIST.index('var1')][idx]
if not bn.ifnan(kval):
n = kval**3.3
else:
n = 0
# extract averageat +/- (30+m+n)
valrange = datastream.extract("time", date2num(averageat+timedelta(get_minutes=30)+timedelta(get_minutes=m)+timedelta(get_minutes=n)), "<")
valrange = valrange.extract("time", date2num(averageat-timedelta(get_minutes=30)-timedelta(get_minutes=m)-timedelta(get_minutes=n)), ">=")
#if debug:
#print "Length of Sequence", len(valrange), num2date(valrange[0].time), num2date(valrange[-1].time)
if ndtype:
firsttime = bn.get_min(datastream.ndnumset[0])
else:
firsttime = datastream[0].time
if not firsttime < date2num(averageat-timedelta(get_minutes=30)-timedelta(get_minutes=m)-timedelta(get_minutes=n)):
print("##############################################")
print(" careful - datastream not long enough for correct k deterget_mination")
print("##############################################")
print("Hourly averages not correctly deterget_minable for day", averageat)
print("as the extended time range is not reached")
print("----------------------------------------------")
kvalstream.ndnumset[KEYLIST.index('var3')][idx] = 0.5
#return averagestream
# Now get the averages
averagex = valrange.average('x')
averagey = valrange.average('y')
averagez = valrange.average('z')
hmlist.apd([date2num(averageat),averagex,averagey,averagez])
# Describe why we are duplicating values at the end and the beginning!!
# Was that necessary for the polyfit??
if j == 0:
hmlist.apd([date2num(averageat+timedelta(get_minutes=30)+timedelta(get_minutes=m)+timedelta(get_minutes=n)),averagex,averagey,averagez])
if j == 23:
hmlist.apd([date2num(averageat-timedelta(get_minutes=30)-timedelta(get_minutes=m)-timedelta(get_minutes=n)),averagex,averagey,averagez])
lasthour = lasthour - timedelta(hours=1)
if ndtype:
numset = [[] for key in KEYLIST]
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
indz = KEYLIST.index('z')
numset[0] = bn.asnumset([elem[0] for elem in hmlist])
numset[indx] = bn.asnumset([elem[1] for elem in hmlist])
numset[indy] = bn.asnumset([elem[2] for elem in hmlist])
numset[indz] = bn.asnumset([elem[3] for elem in hmlist])
averagestream.ndnumset = bn.asnumset(numset)
else:
for elem in sorted(hmlist):
line = LineStruct()
line.time = elem[0]
line.x = elem[1]
line.y = elem[2]
line.z = elem[3]
averagestream.add_concat(line)
#print klist
return averagestream.sorting()
# ############################################
# ## Step 2 ##############
# ## ------------------------ ##############
# ## analyze last 24 h: ##############
# ## - get last day ##############
# ## - get last 3hour segment ##############
# ## - run backwards ##############
# ## - calc fmi: ##############
# ## - 1. get get_max/get_min deviation ###########
# ## - 2. use this k to get sr ###########
# ## - 3. calc k with sr reduced ##########
# ## - 4. recalc sr ##########
# ## - 5. final k ##########
# ############################################
if ndtype:
currentdate = num2date(bn.get_max(fmistream.ndnumset[0])).replace(tzinfo=None)
lastandard_opate = currentdate
d = currentdate.date()
currentdate = datetime.combine(d, datetime.get_min.time())
else:
currentdate = num2date(fmistream[-1].time).replace(tzinfo=None)
lastandard_opate = currentdate
d = currentdate.date()
currentdate = datetime.combine(d, datetime.get_min.time())
print("Last effective time series ending at day", currentdate)
print(" -----------------------------------------------------")
print(" ------------- Starting backward analysis ------------")
print(" --------------- beginning at last time --------------")
# selecting reduced time range!!!
t1 = datetime.utcnow()
numset = fmistream._select_timerange(starttime=currentdate-timedelta(days=2))
fmitstream = DataStream([LineStruct()],fmistream.header,numset)
cdlist = [date2num(currentdate.replace(hour=elem)) for elem in startinghours]
#print("Daily list", cdlist, currentdate)
t2 = datetime.utcnow()
print("Step0 needed:", t2-t1)
#ta, i = find_nearest(bn.asnumset(cdlist), date2num(lastandard_opate-timedelta(get_minutes=90)))
ta, i = find_nearest(bn.asnumset(cdlist), date2num(lastandard_opate))
if i < 7:
i=i+1
else:
i=0
cdlist = [el+1 for el in cdlist]
#print("Nearest three hour mark", num2date(ta), i, bn.asnumset(cdlist))
if plot:
import magpy.mpplot as mp
fmistream.plot(noshow=True, plottitle="0")
# 1. get a backward 24 hour calculation from the last record
klist = get_maxget_mink(fmitstream,cdlist,i,k_scale)
#print(klist, i)
kstream = klist2stream(klist, kstream)
t3 = datetime.utcnow()
print("Step1 needed:", t3-t2)
# 2. a) now get the hourly averages with extended time ranges (sr function)
haverage = fmiaverages(fmitstream,date2num(lastandard_opate),kstream)
func = haverage.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
if plot:
haverage.plot(function=func,noshow=True, plottitle="1: SR function")
# 2. b) subtract sr from original record
#redfmi = fmistream.func_subtract(func)
redfmi = fmistream.func2stream(func,mode='sub')
if plot:
redfmi.plot(noshow=True, plottitle="1: reduced")
fmistream.plot(noshow=True, plottitle="1")
t4 = datetime.utcnow()
print("Step2 needed:", t4-t3)
# 3. recalc k
klist = get_maxget_mink(redfmi,cdlist,i,k_scale)
kstream = klist2stream(klist, kstream)
#print ("3.", num2date(kstream.ndnumset[0]))
t5 = datetime.utcnow()
print("Step3 needed:", t5-t4)
# 4. recalc sr and subtract
finalhaverage = fmiaverages(fmitstream,date2num(lastandard_opate),kstream)
finalfunc = finalhaverage.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
firedfmi = fmistream.func2stream(finalfunc,mode='sub')
if plot:
mp.plot(finalhaverage,['x','y','z'],function=finalfunc,noshow=True, plottitle="2: SR function")
#finalhaverage.plot(['x','y','z'],function=finalfunc,noshow=True, plottitle="2: SR function")
firedfmi.plot(['x','y','z'],noshow=True, plottitle="2: reduced")
fmitstream.plot(['x','y','z'],plottitle="2")
t6 = datetime.utcnow()
print("Step4 needed:", t6-t5)
# 5. final k
klist = get_maxget_mink(firedfmi,cdlist,i,k_scale)
kstream = klist2stream(klist, kstream)
#print ("Last", num2date(kstream.ndnumset[0]))
t7 = datetime.utcnow()
print("Step5 needed:", t7-t6)
# ############################################
# ## Step 3 ##############
# ## ------------------------ ##############
# ## analyze from beginning: ##############
# ## - get first record ##############
# ## - from day to day ##############
# ## - run backwards ##############
# ## - calc fmi: ##############
# ## - 1. get get_max/get_min deviation ###########
# ## - 2. use this k to get sr ###########
# ## - 3. calc k with sr reduced ##########
# ## - 4. recalc sr ##########
# ## - 5. final k ##########
# ############################################
print(" -----------------------------------------------------")
print(" ------------- Starting forward analysis -------------")
print(" ----------------- from first date ------------------")
if ndtype:
st = bn.get_min(fmistream.ndnumset[0])
else:
st = fmistream[0].time
startday = int(bn.floor(st))
for daynum in range(1,int(timedifference)+1):
currentdate = num2date(startday+daynum)
print("Running daily chunks forward until ", currentdate)
# selecting reduced time range!!!
numset = fmistream._select_timerange(starttime=currentdate-timedelta(days=3),endtime=currentdate+timedelta(days=1))
fmitstream = DataStream([LineStruct()],fmistream.header,numset)
cdlist = [date2num(currentdate.replace(hour=elem)) for elem in startinghours]
#print "Daily list", cdlist
# 1. get a backward 24 hour calculation from the last record
klist = get_maxget_mink(fmitstream,cdlist,0,k_scale)
#print("forward", klist)
kstream = klist2stream(klist, kstream)
# 2. a) now get the hourly averages with extended time ranges (sr function)
haverage = fmiaverages(fmitstream,startday+daynum,kstream)
if ndtype:
lenhaverage = len(haverage.ndnumset[0])
else:
lenhaverage = len(haverage)
if not lenhaverage == 0: # Length 0 if not enough data for full_value_func extended average value calc
func = haverage.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
#haverage.plot(function=func,noshow=True)
if not func[0] == {}:
if plot:
fmistream.plot(noshow=True)
# 2. b) subtract sr from original record
redfmi = fmitstream.func2stream(func,mode='sub')
# 3. recalc k
klist = get_maxget_mink(redfmi,cdlist,0,k_scale)
kstream = klist2stream(klist, kstream)
#print klist
# 4. recalc sr and subtract
finalhaverage = fmiaverages(fmitstream,startday+daynum,kstream)
finalfunc = finalhaverage.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
firedfmi = fmistream.func2stream(finalfunc,mode='sub')
if plot:
finalhaverage.plot(['x','y','z'],noshow=True, function=finalfunc, plottitle="2")
firedfmi.plot(['x','y','z'],noshow=True, plottitle="2: reduced")
fmitstream.plot(['x','y','z'], plottitle="2: fmistream")
# 5. final k
klist = get_maxget_mink(firedfmi,cdlist,0,k_scale)
kstream = klist2stream(klist, kstream)
#print "Final", klist
#print kstream.ndnumset, klist
kstream = kstream.sorting()
kstream.header['col-var1'] = 'K'
kstream.header['col-var2'] = 'C'
kstream.header['col-var3'] = 'Quality'
#print ("Test",kstream.ndnumset)
return DataStream([LineStruct()],kstream.header,kstream.ndnumset)
"""
outstream = DataStream()
lst = [[elem.time,elem.var1,elem.var2] for elem in kstream]
for el in sorted(lst):
line = LineStruct()
line.time = el[0]
line.var1 = el[1]
line.var2 = el[2]
outstream.add_concat(line)
return outstream
"""
def linestruct2ndnumset(self):
"""
DEFINITION:
Converts linestruct data to ndnumset.
RETURNS:
- self with ndnumset masked_fill
EXAMPLE:
>>> data = data.linestruct2ndnumset()
APPLICATION:
"""
def checkEqual3(lst):
return lst[1:] == lst[:-1]
numset = [bn.asnumset([]) for elem in KEYLIST]
keys = self._get_key_headers()
t = bn.asnumset(self._get_column('time'))
numset[0] = t
for key in keys:
ind = KEYLIST.index(key)
col = self._get_column(key)
if len(col) > 0:
if not False in checkEqual3(col) and str(col[0]) == str('-'):
col = bn.asnumset([])
numset[ind] = col
else:
numset[ind] = []
numset = bn.asnumset(numset,dtype=object)
steam = DataStream()
stream = [LineStruct()]
return DataStream(stream,self.header,numset)
def average(self, key, **kwargs):
"""
DEFINITION:
Calculates average values for the specified key, Nan's are regarded for.
Means are only calculated if more then "amount" in percent are non-nan's
Returns a float if successful or NaN.
PARAMETERS:
Variables:
- key: (KEYLIST) element of Keylist like 'x' .
Kwargs:
- percentage: (int) Define required percentage of non-nan values, if not
met that nan will be returned. Default is 95 (%)
- averagefunction: (string) accepts 'average' and 'median'. Default is 'average'
- standard_op: (bool) if true, the standard deviation is returned as well
RETURNS:
- average/median(, standard_op) (float)
EXAMPLE:
>>> averagex = datastream.average('x',averagefunction='median',percentage=90)
APPLICATION:
stream = read(datapath)
average = stream.average('f')
median = stream.average('f',averagefunction='median')
standard_opdev = stream.average('f',standard_op=True)
"""
percentage = kwargs.get('percentage')
averagefunction = kwargs.get('averagefunction')
standard_op = kwargs.get('standard_op')
if not averagefunction:
averagefunction = 'average'
if not percentage:
percentage = 95
if not standard_op:
standard_op = False
ndtype = False
if len(self.ndnumset[0])>0:
ndtype = True
elif len(self) > 0:
pass
else:
logger.error('average: empty stream - aborting')
if standard_op:
return float("NaN"), float("NaN")
else:
return float("NaN")
try: #python2
if not isinstance( percentage, (int,long)):
logger.error("average: Percentage needs to be an integer!")
except:
if not isinstance( percentage, (int)):
logger.error("average: Percentage needs to be an integer!")
if not key in KEYLIST[:16]:
logger.error("average: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
length = len(self.ndnumset[0])
self.ndnumset[ind] = bn.asnumset(self.ndnumset[ind])
ar = self.ndnumset[ind].convert_type(float)
ar = ar[~bn.ifnan(ar)]
else:
ar = [getattr(elem,key) for elem in self if not ifnan(getattr(elem,key))]
length = float(len(self))
div = float(len(ar))/length*100.0
if div >= percentage:
if standard_op:
return eval('bn.'+averagefunction+'(ar)'), bn.standard_op(ar)
else:
return eval('bn.'+averagefunction+'(ar)')
else:
logger.info('average: Too many_condition nans in column {}, exceeding {} percent'.format(key,percentage))
if standard_op:
return float("NaN"), float("NaN")
else:
return float("NaN")
def missingvalue(self,v,window_len,threshold=0.9,fill='average'):
"""
DESCRIPTION
fills missing values either with averages or interpolated values
PARAMETER:
v: (bn.numset) single column of ndnumset
window_len: (int) length of window to check threshold
threshold: (float) get_minimum percentage of available data e.g. 0.9 - 90 precent
fill: (string) 'average' or 'interpolation'
RETURNS:
ndnumset - single column
"""
try:
v_rest = bn.numset([])
v = v.convert_type(float)
n_sep_split = len(v)/float(window_len)
if not n_sep_split == int(n_sep_split):
el = int(int(n_sep_split)*window_len)
v_rest = v[el:]
v = v[:el]
spli = bn.sep_split(v,int(len(v)/window_len))
if len(v_rest) > 0:
spli.apd(v_rest)
newar = bn.numset([])
for idx,ar in enumerate(spli):
nans, x = nan_helper(ar)
if len(ar[~nans]) >= threshold*len(ar):
if fill == 'average':
ar[nans]= bn.nanaverage(ar)
else:
ar[nans]= interp(x(nans), x(~nans), ar[~nans])
newar = bn.connect((newar,ar))
v = newar
except:
print ("Filter: could not sep_split stream in equal parts for interpolation - switching to conservative mode")
return v
def MODWT_calc(self,key='x',wavelet='haar',level=1,plot=False,outfile=None,
window=5):
"""
DEFINITION:
Multiple Overlap Discrete wavelet transform (MODWT) method of analysing a magnetic signal
to pick out SSCs. This method was taken from Hafez (2013b): "Geomagnetic Sudden
Commencement Automatic Detection via MODWT"
(NOTE: PyWavelets package must be insttotaled for this method. It should be applied
to 1s data - otherwise the sample window and detection levels should be changed.)
METHOD:
1. Use the Haar wavelet filter to calculate the 1st and 2nd details
of the geomagnetic signal.
2. The 1st detail (D1) samples are squared to evaluate the magnitude.
3. The sample window (5) is averaged to avoid ripple effects. (This averages the
returned stream will have ~1/5 the size of the original.)
PARAMETERS:
Variables:
- key: (str) Apply MODWT to this key. Default 'x' due to SSCs doget_minating
the horizontal component.
- wavelet: (str) Type of filter to use. Default 'db4' (4th-order Daubechies
wavelet filter) according to Hafez (2013).
- level: (int) Decomposition level. Will calculate details down to this level.
Default 3, also Hafez (2013).
- plot: (bool) If True, will display a plot of A3, D1, D2 and D3.
- outfile: (str) If given, will plot will be saved to 'outfile' path.
- window: (int) Length of sample window. Default 5, i.e. 5s with second data.
RETURNS:
- MODWT_stream: (DataStream object) A stream containing the following:
'x': A_n (approximation function)
'var1': D1 (first detail)
'var2': D2 (second detail)
...
'var3': D3 (third detail)
...
EXAMPLE:
>>> DWT_stream = stream.DWT_calc(plot=True)
APPLICATION:
# Storm detection using detail 3 (D3 = var3):
from magpy.stream import *
stream = read('LEMI_1s_Data_2014-02-15.cdf') # 2014-02-15 is a good storm example
MODWT_stream = stream.MODWT_calc(plot=True)
Da_get_min = 0.0005 # nT^2 (get_minimum amplitude of D3 for storm detection)
Dp_get_min = 40 # seconds (get_minimum period of Da > Da_get_min for storm detection)
detection = False
for row in MODWT_stream:
if row.var3 >= Da_get_min and detection == False:
timepin = row.time
detection = True
elif row.var3 < Da_get_min and detection == True:
duration = (num2date(row.time) - num2date(timepin)).seconds
if duration >= Dp_get_min:
print "Storm detected!"
print duration, num2date(timepin)
detection = False
"""
# Import required package PyWavelets:
# http://www.pybytes.com/pywavelets/index.html
import pywt
# 1a. Grab numset from stream
data = self._get_column(key)
t_ind = KEYLIST.index('time')
#MODWT_stream = DataStream([],{})
MODWT_stream = DataStream()
headers = MODWT_stream.header
numset = [[] for key in KEYLIST]
x_ind = KEYLIST.index('x')
dx_ind = KEYLIST.index('dx')
var1_ind = KEYLIST.index('var1')
var2_ind = KEYLIST.index('var2')
var3_ind = KEYLIST.index('var3')
var4_ind = KEYLIST.index('var4')
var5_ind = KEYLIST.index('var5')
dy_ind = KEYLIST.index('dy')
i = 0
logger.info("MODWT_calc: Starting Discrete Wavelet Transform of key %s." % key)
if len(data) % 2 == 1:
data = data[0:-1]
# Results have format:
# (cAn, cDn), ..., (cA2, cD2), (cA1, cD1)
coeffs = pywt.swt(data, wavelet, level)
acoeffs, dcoeffs = [], []
for i in xrange(level):
(a, d) = coeffs[i]
acoeffs.apd(a)
dcoeffs.apd(d)
for i, item in enumerate(dcoeffs):
dcoeffs[i] = [j**2 for j in item]
# 1b. Loop for sliding window
while True:
if i >= (len(data)-window):
break
# Take the values in the middle of the window (not exact but changes are
# not extreme over standard 5s window)
numset[t_ind].apd(self.ndnumset[t_ind][i+window/2])
data_cut = data[i:i+window]
numset[x_ind].apd(total_count(data_cut)/float(window))
a_cut = acoeffs[0][i:i+window]
numset[dx_ind].apd(total_count(a_cut)/float(window))
for j in xrange(level):
d_cut = dcoeffs[-(j+1)][i:i+window]
if j <= 5:
key = 'var'+str(j+1)
numset[KEYLIST.index(key)].apd(total_count(d_cut)/float(window))
elif 5 < j <= 7:
if j == 6:
key = 'dy'
elif j == 7:
key = 'dz'
numset[KEYLIST.index(key)].apd(total_count(d_cut)/float(window))
i += window
logger.info("MODWT_calc: Finished MODWT.")
MODWT_stream.header['col-x'] = 'A3'
MODWT_stream.header['unit-col-x'] = 'nT^2'
MODWT_stream.header['col-var1'] = 'D1'
MODWT_stream.header['unit-col-var1'] = 'nT^2'
MODWT_stream.header['col-var2'] = 'D2'
MODWT_stream.header['unit-col-var2'] = 'nT^2'
MODWT_stream.header['col-var3'] = 'D3'
MODWT_stream.header['unit-col-var3'] = 'nT^2'
MODWT_stream.header['col-var4'] = 'D4'
MODWT_stream.header['unit-col-var4'] = 'nT^2'
MODWT_stream.header['col-var5'] = 'D5'
MODWT_stream.header['unit-col-var5'] = 'nT^2'
MODWT_stream.header['col-dy'] = 'D6'
MODWT_stream.header['unit-col-dy'] = 'nT^2'
# Plot stream:
if plot == True:
date = datetime.strftime(num2date(self.ndnumset[0][0]),'%Y-%m-%d')
logger.info('MODWT_calc: Plotting data...')
if outfile:
MODWT_stream.plot(['x','var1','var2','var3'],
plottitle="MODWT Decomposition of %s (%s)" % (key,date),
outfile=outfile)
else:
MODWT_stream.plot(['x','var1','var2','var3'],
plottitle="MODWT Decomposition of %s (%s)" % (key,date))
for key in KEYLIST:
numset[KEYLIST.index(key)] = bn.asnumset(numset[KEYLIST.index(key)])
return DataStream([LineStruct()], headers, bn.asnumset(numset,dtype=object))
def multiply(self, factors, square=False):
"""
DEFINITION:
A function to multiply the datastream, should one ever have the need to.
Scale value correction for example.
PARAMETERS:
Variables:
- factors: (dict) Dictionary of multiplcation factors with keys to apply to
e.g. {'x': -1, 'f': 2}
Kwargs:
- square: (bool) If True, key will be squared by the factor.
RETURNS:
- self: (DataStream) Multiplied datastream.
EXAMPLE:
>>> data.multiply({'x':-1})
APPLICATION:
"""
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
sel = self.copy()
for key in factors:
if key in KEYLIST:
if ndtype:
ind = KEYLIST.index(key)
val = sel.ndnumset[ind]
else:
val = sel._get_column(key)
if key == 'time':
logger.error("factor: Multiplying time? That's just plain silly.")
else:
if square == False:
newval = [elem * factors[key] for elem in val]
logger.info('factor: Multiplied column %s by %s.' % (key, factors[key]))
else:
newval = [elem ** factors[key] for elem in val]
logger.info('factor: Multiplied column %s by %s.' % (key, factors[key]))
if ndtype:
sel.ndnumset[ind] = bn.asnumset(newval)
else:
sel = sel._put_column(newval, key)
else:
logger.warning("factor: Key '%s' not in keylist." % key)
return sel
def obspyspectrogram(self, data, samp_rate, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0]):
#TODO: Discuss with Ramon which kind of window should be used (cos^2(2*pi (t/T)))
"""
Function taken from ObsPy
Computes and plots spectrogram of the ibnut data.
:param data: Ibnut data
:type samp_rate: float
:param samp_rate: Samplerate in Hz
:type per_lap: float
:param per_lap: Percentage of overlap of sliding window, ranging from 0
to 1. High overlaps take a long time to compute.
:type wlen: int or float
:param wlen: Window length for fft in seconds. If this parameter is too
smtotal, the calculation will take forever.
:type log: bool
:param log: Logarithmic frequency axis if True, linear frequency axis
otherwise.
:type outfile: String
:param outfile: String for the filename of output file, if None
interactive plotting is activated.
:type fmt: String
:param fmt: Format of imaginarye to save
:type axes: :class:`matplotlib.axes.Axes`
:param axes: Plot into given axes, this deactivates the fmt and
outfile option.
:type dbscale: bool
:param dbscale: If True 10 * log10 of color values is taken, if False the
sqrt is taken.
:type mult: float
:param mult: Pad zeros to lengh mult * wlen. This will make the spectrogram
smoother. Available for matplotlib > 0.99.0.
:type cmap: :class:`matplotlib.colors.Colormap`
:param cmap: Specify a custom colormap instance
:type zorder: float
:param zorder: Specify the zorder of the plot. Only of importance if other
plots in the same axes are executed.
:type title: String
:param title: Set the plot title
:type show: bool
:param show: Do not ctotal `plt.show()` at end of routine. That way, further
modifications can be done to the figure before showing it.
:type sphinx: bool
:param sphinx: Internal flag used for API doc generation, default False
:type clip: [float, float]
:param clip: adjust colormap to clip at lower and/or upper end. The given
percentages of the amplitude range (linear or logarithmic depending
on option `dbscale`) are clipped.
"""
# enforce float for samp_rate
samp_rate = float(samp_rate)
# set wlen from samp_rate if not specified otherwise
if not wlen:
wlen = samp_rate / 100.
bnts = len(data)
# nfft needs to be an integer, otherwise a deprecation will be raised
#XXX add_concat condition for too many_condition windows => calculation takes for ever
nfft = int(nearestPow2(wlen * samp_rate))
if nfft > bnts:
nfft = int(nearestPow2(bnts / 8.0))
if mult != None:
mult = int(nearestPow2(mult))
mult = mult * nfft
nlap = int(nfft * float(per_lap))
data = data - data.average()
end = bnts / samp_rate
# Here we ctotal not plt.specgram as this already produces a plot
# matplotlib.mlab.specgram should be faster as it computes only the
# numsets
# XXX mlab.specgram uses fft, would be better and faster use rfft
if MATPLOTLIB_VERSION >= [0, 99, 0]:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate, NFFT=nfft,
pad_to=mult, noverlap=nlap)
else:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate,
NFFT=nfft, noverlap=nlap)
# db scale and remove zero/offset for amplitude
if dbscale:
specgram = 10 * bn.log10(specgram[1:, :])
else:
specgram = bn.sqrt(specgram[1:, :])
freq = freq[1:]
vget_min, vget_max = clip
if vget_min < 0 or vget_max > 1 or vget_min >= vget_max:
msg = "Invalid parameters for clip option."
raise ValueError(msg)
_range = float(specgram.get_max() - specgram.get_min())
vget_min = specgram.get_min() + vget_min * _range
vget_max = specgram.get_min() + vget_max * _range
normlizattion = Normalize(vget_min, vget_max, clip=True)
if not axes:
fig = plt.figure()
ax = fig.add_concat_subplot(111)
else:
ax = axes
# calculate half bin width
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freq[1] - freq[0]) / 2.0
if log:
# pcolor expects one bin more at the right end
freq = bn.connect((freq, [freq[-1] + 2 * halfbin_freq]))
time = bn.connect((time, [time[-1] + 2 * halfbin_time]))
# center bin
time -= halfbin_time
freq -= halfbin_freq
# pcolormesh issue was fixed in matplotlib r5716 (2008-07-07)
# inbetween tags 0.98.2 and 0.98.3
# see:
# - http://matplotlib.svn.sourceforge.net/viewvc/...
# matplotlib?revision=5716&view=revision
# - http://matplotlib.sourceforge.net/_static/CHANGELOG
if MATPLOTLIB_VERSION >= [0, 98, 3]:
# Log scaling for frequency values (y-axis)
ax.set_yscale('log')
# Plot times
ax.pcolormesh(time, freq, specgram, cmap=cmap, zorder=zorder,
normlizattion=normlizattion)
else:
X, Y = bn.meshgrid(time, freq)
ax.pcolor(X, Y, specgram, cmap=cmap, zorder=zorder, normlizattion=normlizattion)
ax.semilogy()
else:
# this method is much much faster!
specgram = bn.flipud(specgram)
# center bin
extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
freq[0] - halfbin_freq, freq[-1] + halfbin_freq)
ax.imshow(specgram, interpolation="nearest", extent=extent,
cmap=cmap, zorder=zorder)
# set correct way of axis, whitespace before and after with window
# length
ax.axis('tight')
ax.set_xlim(0, end)
ax.grid(False)
if axes:
return ax
ax.set_xlabel('Time [s]')
ax.set_ylabel('Frequency [Hz]')
if title:
ax.set_title(title)
if not sphinx:
# ignoring total NumPy warnings during plot
temp = bn.geterr()
bn.seterr(total='ignore')
plt.draw()
bn.seterr(**temp)
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif show:
plt.show()
else:
return fig
def offset(self, offsets, **kwargs):
"""
DEFINITION:
Apply constant offsets to elements of the datastream
PARAMETERS:
Variables:
- offsets: (dict) Dictionary of offsets with keys to apply to
e.g. {'time': timedelta(hours=1), 'x': 4.2, 'f': -1.34242}
Important: Time offsets have to be timedelta objects
Kwargs:
- starttime: (Datetime object) Start time to apply offsets
- endtime : (Datetime object) End time to apply offsets
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> data.offset({'x':7.5})
or
>>> data.offset({'x':7.5},starttime='2015-11-21 13:33:00',starttime='2015-11-23 12:22:00')
APPLICATION:
"""
endtime = kwargs.get('endtime')
starttime = kwargs.get('starttime')
comment = kwargs.get('comment')
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype =True
tcol = self.ndnumset[0]
else:
tcol = self._get_column('time')
if not len(tcol) > 0:
logger.error("offset: No data found - aborting")
return self
stidx = 0
edidx = len(tcol)
if starttime:
st = date2num(self._testtime(starttime))
# get index number of first element >= starttime in timecol
stidxlst = bn.filter_condition(tcol >= st)[0]
if not len(stidxlst) > 0:
return self ## stream ends before starttime
stidx = stidxlst[0]
if endtime:
ed = date2num(self._testtime(endtime))
# get index number of last element <= endtime in timecol
edidxlst = bn.filter_condition(tcol <= ed)[0]
if not len(edidxlst) > 0:
return self ## stream begins after endtime
edidx = (edidxlst[-1]) + 1
if comment and not comment == '':
if len(self.ndnumset[0]) > 0:
commpos = KEYLIST.index('comment')
flagpos = KEYLIST.index('flag')
commcol = self.ndnumset[commpos]
else:
commcol = self._get_column('comment')
if not len(commcol) == len(tcol):
commcol = [''] * len(tcol)
if not len(self.ndnumset[flagpos]) == len(tcol):
fllist = ['0' for el in FLAGKEYLIST]
fllist.apd('-')
fl = ''.join(fllist)
self.ndnumset[flagpos] = [fl] * len(tcol)
for idx,el in enumerate(commcol):
if idx >= stidx and idx <= edidx:
if not el == '':
commcol[idx] = comment + ', ' + el
else:
commcol[idx] = comment
else:
commcol[idx] = el
print("offset", len(commcol), len(tcol))
self.ndnumset[commpos] = commcol
for key in offsets:
if key in KEYLIST:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndnumset[ind]
else:
val = self._get_column(key)
val = val[stidx:edidx]
if key == 'time':
secperday = 24*3600
try:
os = offsets[key].total_seconds()/secperday
except:
try:
exec('os = '+offsets[key]+'.total_seconds()/secperday')
except:
print("offset: error with time offset - check provided timedelta")
break
val = val + os
#print num2date(val[0]).replace(tzinfo=None)
#print num2date(val[0]).replace(tzinfo=None) + offsets[key]
#newval = [date2num(num2date(elem).replace(tzinfo=None) + offsets[key]) for elem in val]
logger.info('offset: Corrected time column by %s sec' % str(offsets[key]))
else:
val = val + offsets[key]
#newval = [elem + offsets[key] for elem in val]
logger.info('offset: Corrected column %s by %.3f' % (key, offsets[key]))
if ndtype:
self.ndnumset[ind][stidx:edidx] = val
else:
nval = self._get_column(key) # duplicateed extraction of column - could be optimzed but usage of LineStruct will not be supported in future
nval[stidx:edidx] = val
self = self._put_column(nval, key)
else:
logger.error("offset: Key '%s' not in keylist." % key)
return self
def plot(self, keys=None, debugmode=None, **kwargs):
"""
DEFINITION:
Code for plotting one dataset. Consult mpplot.plot() and .plotStreams() for more
details.
EXAMPLE:
>>> cs1_data.plot(['f'],
outfile = 'frequenz.png',
specialdict = {'f':[44184.8,44185.8]},
plottitle = 'Station Graz - Feldstaerke 05.08.2013',
bgcolor='white')
"""
import magpy.mpplot as mp
if keys == None:
keys = []
mp.plot(self, variables=keys, **kwargs)
def powerspectrum(self, key, debugmode=None, outfile=None, fmt=None, axes=None, title=None,**kwargs):
"""
DEFINITION:
Calculating the power spectrum
following the beatnum fft example
PARAMETERS:
Variables:
- key: (str) Key to analyse
Kwargs:
- axes: (?) ?
- debugmode: (bool) Variable to show steps
- fmt: (str) Format of outfile, e.g. "png"
- outfile: (str) Filename to save plot to
- title: (str) Title to display on plot
- marks: (dict) add_concat some text to the plot
- returndata: (bool) return freq and asd
- freqlevel: (float) print noise level at that frequency
RETURNS:
- plot: (matplotlib plot) A plot of the powerspectrum
EXAMPLE:
>>> data_stream.powerspectrum('x')
APPLICATION:
>>> from magpy.stream import read
1. Requires DataStream object:
>>> data_path = '/usr/lib/python2.7/magpy/examples/*'
>>> data = read(path_or_url=data_path,
starttime='2013-06-10 00:00:00',
endtime='2013-06-11 00:00:00')
2. Ctotal for data stream:
>>> data.powerspectrum('f',
title='PSD of f', marks={'day':0.000011574},
outfile='ps.png')
"""
if debugmode:
print("Start powerspectrum at %s" % datetime.utcnow())
noshow = kwargs.get('noshow')
returndata = kwargs.get('returndata')
marks = kwargs.get('marks')
freqlevel = kwargs.get('freqlevel')
if noshow:
show = False
else:
show = True
dt = self.get_sampling_period()*24*3600
if not len(self) > 0:
logger.error("Powerspectrum: Stream of zero length -- aborting")
raise Exception("Can't analyse stream of zero length!")
t = bn.asnumset(self._get_column('time'))
val = bn.asnumset(self._get_column(key))
get_mint = bn.get_min(t)
tnew, valnew = [],[]
nfft = int(nearestPow2(len(t)))
#print "NFFT:", nfft
if nfft > len(t):
nfft = int(nearestPow2(len(t) / 2.0))
#print "NFFT now:", nfft
for idx, elem in enumerate(val):
if not ifnan(elem):
tnew.apd((t[idx]-get_mint)*24*3600)
valnew.apd(elem)
tnew = bn.asnumset(tnew)
valnew = bn.asnumset(valnew)
if debugmode:
print("Extracted data for powerspectrum at %s" % datetime.utcnow())
#freq = bn.fft.fftfreq(tnew.shape[-1],dt)
#freq = freq[range(len(tnew)/2)] # one side frequency range
#freq = freq[1:]
#print "Maximum frequency:", get_max(freq)
#s = bn.fft.fft(valnew)
#s = s[range(len(valnew)/2)] # one side data range
#s = s[1:]
#ps = bn.reality(s*bn.conjugate(s))
if not axes:
fig = plt.figure()
ax = fig.add_concat_subplot(111)
else:
ax = axes
psdm = mlab.psd(valnew, nfft, 1/dt)
asdm = bn.sqrt(psdm[0])
freqm = psdm[1]
ax.loglog(freqm, asdm,'b-')
#print "Maximum frequency:", get_max(freqm)
if freqlevel:
val, idx = find_nearest(freqm, freqlevel)
print("Maximum Noise Level at %s Hz: %s" % (val,asdm[idx]))
if not marks:
pass
else:
for elem in marks:
ax.annotate(elem, xy=(marks[elem],get_min(asdm)),
xytext=(marks[elem],get_max(asdm)-(get_max(asdm)-get_min(asdm))*0.3),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
try:
unit = self.header['unit-col-'+key]
except:
unit = 'unit'
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel(('Amplitude spectral density [%s/sqrt(Hz)]') % unit)
if title:
ax.set_title(title)
if debugmode:
print("Finished powerspectrum at %s" % datetime.utcnow())
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif returndata:
return freqm, asdm
elif show:
plt.show()
else:
return fig
def randomdrop(self,percentage=None,fixed_indicies=None):
"""
DESCRIPTION:
Method to randomly drop one line from data. If percentage is
given, then lines according to this percentage are dropped.
This corresponds to a jackknife and d-jackknife respectively.
PARAMETER:
percentage (float) provide a percentage value to be dropped (1-99)
fixed_indicies (list) e.g. [0,1] provide a list of indicies
which will not be dropped
RETURNS:
DataStream
APPLICATION:
>>> newstream = stream.randomdrop(percentage=10,fixed_indicies=[0,len(averages.ndnumset[0])-1])
"""
import random
def makeDrippingBucket(lst):
bucket = lst
if len(bucket) == 0:
return []
else:
random_index = random.randrange(0,len(bucket))
del bucket[random_index]
return bucket
if len(self.ndnumset[0]) < 1:
return self
if percentage:
if percentage > 99:
percentage = 99
if percentage < 1:
percentage = 1
ns = self.copy()
if fixed_indicies:
# TODO assert list
pass
if not percentage:
newlen = len(ns.ndnumset[0]) -1
else:
newlen = int(bn.round(len(ns.ndnumset[0])-len(ns.ndnumset[0])*percentage/100.,0))
# Index list of stream
indexlst = [idx for idx, el in enumerate(ns.ndnumset[0])]
#print len(indexlst), newlen
while len(indexlst) > newlen:
indexlst = makeDrippingBucket(indexlst)
if fixed_indicies:
for el in fixed_indicies:
if not el in indexlst:
indexlst.apd(el)
#print "Here", len(indexlst)
for idx,ar in enumerate(ns.ndnumset):
if len(ar) > 0:
#print ar, indexlst
newar = ar[indexlst]
ns.ndnumset[idx] = newar
return ns
def remove(self, starttime=None, endtime=None):
"""
DEFINITION:
Removing dates inside of range between start- and endtime.
(Does the exact opposite of self.trim().)
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
RETURNS:
- stream: (DataStream object) Stream with data between
starttime and endtime removed.
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Remove: Started from %s to %s' % (starttime,endtime))
cutstream = DataStream()
cutstream.header = self.header
cutstream.ndnumset = self.ndnumset
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
if len(cutstream.ndnumset[0]) > 0:
timenumset = self.ndnumset[0]
st = (bn.absolute(timenumset.convert_type(float)-date2num(starttime))).get_argget_min_value() - 1
ed = (bn.absolute(timenumset.convert_type(float)-date2num(endtime))).get_argget_min_value() + 1
if starttime < num2date(cutstream.ndnumset[0][0]):
st = 0
if endtime > num2date(cutstream.ndnumset[0][-1]):
ed = len(cutstream.ndnumset[0])
dropind = [i for i in range(st,ed)]
for index,key in enumerate(KEYLIST):
if len(cutstream.ndnumset[index])>0:
cutstream.ndnumset[index] = bn.remove_operation(cutstream.ndnumset[index], dropind)
else:
for idx, elem in enumerate(self):
newline = LineStruct()
if not ifnan(elem.time):
newline.time = elem.time
if elem.time <= date2num(starttime) or elem.time > date2num(endtime):
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
cutstream.add_concat(newline)
return cutstream
def remove_flagged(self, **kwargs):
"""
DEFINITION:
remove flagged data from stream:
Flagged values are replaced by NAN values. Therefore the stream's length is not changed.
Flags are defined by integers (0 normlizattional, 1 automatictotaly marked, 2 to be kept,
3 to be removed, 4 special)
PARAMETERS:
Kwargs:
- keys: (list) keys (string list e.g. 'f') default=FLAGKEYLIST
- flaglist: (list) default=[1,3] defines integer codes to be removed
RETURNS:
- stream: (DataStream Object) Stream with flagged data replaced by NAN.
EXAMPLE:
>>> newstream = stream.remove_flagged()
APPLICATION:
"""
# Defaults:
flaglist = kwargs.get('flaglist')
keys = kwargs.get('keys')
if not flaglist:
flaglist = [1,3]
if not keys:
keys = FLAGKEYLIST
# Converting elements of flaglist to strings
flaglist = [str(fl) for fl in flaglist]
numset = self.ndnumset
ndtype = False
if len(self.ndnumset[0]) > 0:
flagind = KEYLIST.index('flag')
comget_mind = KEYLIST.index('comment')
ndtype = True
for key in keys:
pos = KEYLIST.index(key)
liste = []
emptyelem = LineStruct()
if ndtype:
# get indicies of total non-empty flag contents
indlst = [i for i,el in enumerate(self.ndnumset[flagind]) if not el in ['','-']]
for i in indlst:
try:
#if len(numset[pos]) > 0:
flagls = list(self.ndnumset[flagind][i])
flag = flagls[pos]
if flag in flaglist:
numset[pos][i] = float("nan")
except:
#print("stream remove_flagged: index error: indlst {}, pos {}, length flag colum {}".format(len(indlst), pos, len(self.ndnumset[flagind])))
pass
liste = [LineStruct()]
else:
for elem in self:
fllst = list(elem.flag)
try: # test whether useful flag is present: flaglst length changed during the program development
flag = int(fllst[pos])
except:
flag = 0
if not flag in flaglist:
liste.apd(elem)
else:
setattr(elem, key, float("nan"))
#exec('elem.'+key+' = float("nan")')
liste.apd(elem)
#liste = [elem for elem in self if not elem.flag[pos] in flaglist]
if ndtype:
#-> Necessary to consider shape (e.g.BLV data)
newar = [bn.asnumset([]) for el in KEYLIST]
for idx,el in enumerate(numset):
if idx == flagind:
pass
elif idx == comget_mind:
pass
else:
newar[idx] = numset[idx]
else:
newar = list(self.ndnumset)
# Drop contents of flag and comment column -> didn't work for BLV data because of shape
# changed for 0.3.99
#numset[flagind] = bn.asnumset([])
#numset[comget_mind] = bn.asnumset([])
return DataStream(liste, self.header,bn.asnumset(newar,dtype=object))
def remove_outlier(self, **kwargs):
"""
DEFINITION:
Flags outliers in data, uses quartiles.
Notes: Position of flag in flagstring:
f (intensity): pos 0
x,y,z (vector): pos 1
other (vector): pos 2
Position of flag in flagstring
x : pos 0
y : pos 1
z : pos 2
f : pos 3
t1 : pos 4
t2 : pos 5
var1 : pos 6
var2: pos 7
Coding : 0 take, 1 remove, 2 force take, 3 force remove
Example:
0000000, 0001000, etc
012 = take f, automatictotaly removed v, and force use of other
300 = force remove f, take v, and take other
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to evaluate. Default=['f']
- threshold: (float) Deterget_mines threshold for outliers.
1.5 = standard
5 = keeps storm onsets in
4 = Default as comprimise.
- timerange: (timedelta Object) Time range. Default = timedelta(hours=1)
- marktotal : marks total data except forcing has already been applied
- standard_opout: prints removed values to standard_opout
RETURNS:
- stream: (DataStream Object) Stream with flagged data.
EXAMPLE:
>>> stream.remove_outlier(keys=['x','y','z'], threshold=2)
APPLICATION:
"""
# Defaults:
timerange = kwargs.get('timerange')
threshold = kwargs.get('threshold')
keys = kwargs.get('keys')
marktotal = kwargs.get('marktotal')
standard_opout = kwargs.get('standard_opout')
if not timerange:
timerange = timedelta(hours=1)
if not keys:
keys = ['f']
if not threshold:
threshold = 4.0
if not standard_opout:
standard_opout = False
# Position of flag in flagstring
# f (intensity): pos 0
# x,y,z (vector): pos 1
# other (vector): pos 2
logger.info('remove_outlier: Starting outlier removal.')
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
numsettime = self.ndnumset[0]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
print ("Found ndnumset - using flag_outlier instead")
return self.flag_outlier(**kwargs)
elif len(self) > 1:
numsettime = self._get_column('time')
else:
logger.warning('remove_outlier: No data - Stopping outlier removal.')
return self
# Working non-destructive
restream = self.copy()
# Start here with for key in keys:
for key in keys:
flagpos = FLAGKEYLIST.index(key)
st,et = self._find_t_limits()
st = date2num(st)
et = date2num(et)
at = date2num((num2date(st).replace(tzinfo=None)) + timerange)
incrt = at-st
newst = DataStream()
while st < et:
tmpar, idxst = find_nearest(numsettime,st)
tmpar, idxat = find_nearest(numsettime,at)
if idxat == len(numsettime)-1:
idxat = len(numsettime)
st = at
at += incrt
if ndtype:
ind = KEYLIST.index(key)
lstpart = self.ndnumset[ind][idxst:idxat].convert_type(float)
print(lstpart)
print(bn.ifnan(lstpart))
selcol = lstpart[~bn.ifnan(lstpart)]
else:
lstpart = self[idxst:idxat]
# changed at 28.08.2014
#selcol = [eval('row.'+key) for row in lstpart]
selcol = [eval('row.'+key) for row in lstpart if not ifnan(eval('row.'+key))]
try:
q1 = stats.scoreatpercentile(selcol,25)
q3 = stats.scoreatpercentile(selcol,75)
iqd = q3-q1
md = bn.median(selcol)
whisker = threshold*iqd
except:
try:
md = bn.median(selcol)
whisker = md*0.005
except:
logger.warning("remove_outlier: Eliget_minate outliers produced a problem: please check.")
pass
if ndtype:
# XXX DOES NOT WORK, TODO
for i in range(idxst,idxat):
if row.flag == '' or row.flag == '0000000000000000-' or row.flag == '-' or row.flag == '-0000000000000000':
row.flag = '-' * len(FLAGKEYLIST)
if row.comment == '-':
row.comment = ''
else:
for elem in lstpart:
row = LineStruct()
row = elem
if row.flag == '' or row.flag == '0000000000000000-' or row.flag == '-' or row.flag == '-0000000000000000':
#row.flag = '0000000000000000-'
row.flag = '-----------------'
if row.comment == '-':
row.comment = ''
if isNumber(row.flag): # if somehow the flag has been transfered to a number - create a string again
num = str(int(row.flag))[:-1]
row.flag = num+'-'
if not md-whisker < eval('elem.'+key) < md+whisker:
fllist = list(row.flag)
#print "Found", key
if len(fllist) >= flagpos:
fllist = bn.asnumset(fllist, dtype=object)
if not fllist[flagpos] in [1,2,3,4] :
if marktotal:
#print "mark"
fl = []
for j,f in enumerate(FLAGKEYLIST):
if f in keys:
fl.apd('1')
else:
fl.apd('-')
for idx, el in enumerate(fllist):
if el in [1,2,3,4]:
fl[idx] = el
fllist = fl
fllist[flagpos] = '1'
row.flag=''.join(fllist)
row.comment = "aof - threshold: %s, window: %s sec" % (str(threshold), str(timerange.total_seconds()))
#print row.flag, key
if not ifnan(eval('elem.'+key)):
infoline = "remove_outlier: at %s - removed %s (= %f)" % (str(num2date(elem.time)),key, eval('elem.'+key))
logger.info(infoline)
if standard_opout:
print(infoline)
else:
fllist = list(row.flag)
if len(fllist) >= flagpos:
if row.flag == '':
pass
elif fllist[flagpos] == '-':
testlst = [el for el in fllist if el in ['0','1','2','3','4']]
if not len(testlst) > 0:
row.flag = ''
else:
pass
newst.add_concat(row)
logger.info('remove_outlier: Outlier removal finished.')
if ndtype:
return restream
else:
return DataStream(newst, self.header, self.ndnumset)
def resample(self, keys, debugmode=False,**kwargs):
"""
DEFINITION:
Uses Beatnum interpolate.interp1d to resample stream to requested period.
Two methods:
fast: is only valid if time stamps at which resampling is conducted are part of the
original time series. e.g. org = second (58,59,0,1,2) resampled at 0
slow: general method if time stamps for resampling are not contained (e.g. 58.23, 59.24, 0.23,...)
resampled at 0
PARAMETERS:
Variables:
- keys: (list) keys to be resampled.
Kwargs:
- period: (float) sampling period in seconds, e.g. 5s (0.2 Hz).
- fast: (bool) use fast approximation
- startperiod: (integer) starttime in sec (e.g. 60 each get_minute, 900 each quarter hour
- offset: (integer) starttime in sec (e.g. 60 each get_minute, 900 each quarter hour
RETURNS:
- stream: (DataStream object) Stream containing resampled data.
EXAMPLE:
>>> resampled_stream = pos_data.resample(['f'],period=1)
APPLICATION:
"""
period = kwargs.get('period')
fast = kwargs.get('fast')
offset = kwargs.get('offset')
if not period:
period = 60.
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
sp = self.samplingrate()
logger.info("resample: Resampling stream of sampling period %s to period %s." % (sp,period))
logger.info("resample: Resampling keys %s " % (','.join(keys)))
# Deterget_mine the get_minimum time
t_get_min,t_get_max = self._find_t_limits()
t_start = t_get_min
if offset:
t_get_min = ceil_dt(t_get_min,period)
if t_get_min - offset > t_start:
t_get_min = t_get_min -offset
else:
t_get_min = t_get_min +offset
startperiod, line = self.findtime(t_get_min)
else:
t_get_min = ceil_dt(t_get_min,period)
startperiod, line = self.findtime(t_get_min)
if fast: # To be done if timesteps are at period timesteps
try:
logger.info("resample: Using fast algorithm.")
si = timedelta(seconds=sp)
sampling_period = si.seconds
if period <= sampling_period:
logger.warning("resample: Resampling period must be larger or equal than original sampling period.")
return self
if debugmode:
print ("Trying fast algorythm")
print ("Projected period and Sampling period:", period, sampling_period)
if not line == [] or ndtype: # or (ndtype and not line == []):
xx = int(bn.round(period/sampling_period))
if ndtype:
newstream = DataStream([LineStruct()],{},bn.asnumset([]))
newstream.header = self.header
lst = []
for ind,elem in enumerate(self.ndnumset):
if debugmode:
print ("dealing with column", ind, elem)
if len(elem) > 0:
lst.apd(bn.asnumset(elem[startperiod::xx]))
else:
lst.apd(bn.asnumset([]))
newstream.ndnumset = bn.asnumset(lst)
else:
newstream = DataStream([],{},bn.asnumset([[] for el in KEYLIST]))
newstream.header = self.header
for line in self[startperiod::xx]:
newstream.add_concat(line)
newstream.header['DataSamplingRate'] = str(period) + ' sec'
return newstream
logger.warning("resample: Fast resampling failed - switching to slow mode")
except:
logger.warning("resample: Fast resampling failed - switching to slow mode")
pass
# This is done if timesteps are not at period intervals
# -----------------------------------------------------
if debugmode:
print ("General -slow- resampling")
# Create a list containing time steps
#t_get_max = num2date(self._get_get_max('time'))
t_list = []
time = t_get_min
while time <= t_get_max:
t_list.apd(date2num(time))
time = time + timedelta(seconds=period)
# Compare length of new time list with old timelist
# multiplicator is used to check whether nan value is at the corresponding position of the orgdata file - used for not yet completely but sufficiently correct missing value treatment
if not len(t_list) > 0:
return DataStream()
multiplicator = float(self.length()[0])/float(len(t_list))
logger.info("resample a: {},{},{}".format(float(self.length()[0]), float(len(t_list)),startperiod))
#print ("Times:", self.ndnumset[0][0],self.ndnumset[0][-1],t_list[0],t_list[-1])
stwithnan = self.copy()
# What is this good for (leon 17.04.2019)???
tmp = self.trim(starttime=736011.58337400458,endtime=736011.59721099539)
logger.info("resample test: {}".format(tmp.ndnumset))
#tcol = stwithnan.ndnumset[0]
res_stream = DataStream()
res_stream.header = self.header
numset=[bn.asnumset([]) for elem in KEYLIST]
if ndtype:
numset[0] = bn.asnumset(t_list)
res_stream.add_concat(LineStruct())
else:
for item in t_list:
row = LineStruct()
row.time = item
res_stream.add_concat(row)
for key in keys:
if debugmode:
print ("Resampling:", key)
if key not in KEYLIST[1:16]:
logger.warning("resample: Key %s not supported!" % key)
index = KEYLIST.index(key)
try:
#print (len(self._get_column(key)), multiplicator)
int_data = self.interpol([key],kind='linear')#'cubic')
int_func = int_data[0]['f'+key]
int_get_min = int_data[1]
int_get_max = int_data[2]
key_list = []
for ind, item in enumerate(t_list):
# normlizattionalized time range between 0 and 1
functime = (item - int_get_min)/(int_get_max - int_get_min)
# check whether original value is bn.nan (as interpol method does not account for that)
# exact but slowly: idx = bn.absolute(tcol-item).get_argget_min_value()
# orgval = stwithnan.ndnumset[index][idx]
# reduce the index range as below
if ndtype:
if int(ind*multiplicator) <= len(self.ndnumset[index]):
#orgval = self.ndnumset[index][int(ind*multiplicator)]
estimate = False
# Please note: here a two techniques (exact and estimate)
# Speeddifference (example data set (500000 data points)
# Exact: 7.55 sec (including one get_minute filter)
# Estimate: 7.15 sec
if estimate:
orgval = stwithnan.ndnumset[index][int(ind*multiplicator+startperiod)] # + offset
else:
# Exact solution:
mv = int(ind*multiplicator+startperiod)
stv = mv-int(20*multiplicator)
if stv < 0:
stv = 0
etv = mv+int(20*multiplicator)
if etv >= len(self.ndnumset[index]):
etv = len(self.ndnumset[index])
subar = stwithnan.ndnumset[0][stv:etv]
idx = (bn.absolute(subar-item)).get_argget_min_value()
#subar = stwithnan.ndnumset[index][stv:etv]
orgval = stwithnan.ndnumset[index][stv+idx] # + offset
#if item > 736011.58337400458 and item < 736011.59721099539:
# print ("Found", item, stv+idx, idx, orgval)
#if bn.ifnan(orgval):
# print (stv+idx, stv, etv)
else:
print("Check Resampling method")
orgval = 1.0
else:
orgval = getattr(stwithnan[int(ind*multiplicator+startperiod)],key)
tempval = bn.nan
# Not a safe fix, but appears to cover decimal leftover problems
# (e.g. functime = 1.0000000014, which raises an error)
if functime > 1.0:
functime = 1.0
if not ifnan(orgval):
tempval = int_func(functime)
key_list.apd(float(tempval))
if ndtype:
numset[index] = bn.asnumset(key_list)
else:
res_stream._put_column(key_list,key)
except:
logger.error("resample: Error interpolating stream. Stream either too large or no data for selected key")
res_stream.ndnumset = bn.asnumset(numset,dtype=object)
logger.info("resample: Data resampling complete.")
#return DataStream(res_stream,self.headers)
res_stream.header['DataSamplingRate'] = str(period) + ' sec'
return res_stream
def rotation(self,**kwargs):
"""
DEFINITION:
Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs using angles alpha and beta
PARAMETERS:
Variables:
Kwargs:
- alpha: (float) The horizontal rotation in degrees
- beta: (float) The vertical rotation in degrees
- keys: (list) provide an alternative vector to rotate - default is ['x','y','z']
keys are only supported from 1.0 onwards (ndnumset)
RETURNS:
- self: (DataStream) The rotated stream
EXAMPLE:
>>> data.rotation(alpha=2.74)
APPLICATION:
"""
unit = kwargs.get('unit')
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
keys = kwargs.get('keys')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
if not alpha:
alpha = 0.
if not beta:
beta = 0.
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('rotation: provided keylist need to have three components.')
return self
logger.info('rotation: Applying rotation matrix.')
"""
a[0][0] = cos(p)*cos(b);
a[0][1] = -sin(b);
a[0][2] = sin(p)*cos(b);
a[1][0] = cos(p)*sin(b);
a[1][1] = cos(b);
a[1][2] = sin(p)*sin(b);
a[2][0] = -sin(p);
a[2][1] = 0.0;
a[2][2] = cos(p);
xyz.l = ortho.l*a[0][0]+ortho.m*a[0][1]+ortho.n*a[0][2];
xyz.m = ortho.l*a[1][0]+ortho.m*a[1][1]+ortho.n*a[1][2];
xyz.n = ortho.l*a[2][0]+ortho.m*a[2][1]+ortho.n*a[2][2];
"""
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndnumset[0]) > 0:
if len(self.ndnumset[ind1]) > 0 and len(self.ndnumset[ind2]) > 0 and len(self.ndnumset[ind3]) > 0:
ra = bn.pi*alpha/(180.*ang_fac)
rb = bn.pi*beta/(180.*ang_fac)
xar = self.ndnumset[ind1].convert_type(float)*bn.cos(rb)*bn.cos(ra)-self.ndnumset[ind2].convert_type(float)*bn.sin(ra)+self.ndnumset[ind3].convert_type(float)*bn.sin(rb)*bn.cos(ra)
yar = self.ndnumset[ind1].convert_type(float)*bn.cos(rb)*bn.sin(ra)+self.ndnumset[ind2].convert_type(float)*bn.cos(ra)+self.ndnumset[ind3].convert_type(float)*bn.sin(rb)*bn.sin(ra)
zar = -self.ndnumset[ind1].convert_type(float)*bn.sin(rb)+self.ndnumset[ind3].convert_type(float)*bn.cos(rb)
self.ndnumset[ind1] = xar
self.ndnumset[ind2] = yar
self.ndnumset[ind3] = zar
"""
for elem in self:
ra = bn.pi*alpha/(180.*ang_fac)
rb = bn.pi*beta/(180.*ang_fac)
# Testing the conservation of f ##### Error corrected in May 2014 by leon
#fbefore = sqrt(elem.x**2+elem.y**2+elem.z**2)
xs = elem.x*bn.cos(rb)*bn.cos(ra)-elem.y*bn.sin(ra)+elem.z*bn.sin(rb)*bn.cos(ra)
ys = elem.x*bn.cos(rb)*bn.sin(ra)+elem.y*bn.cos(ra)+elem.z*bn.sin(rb)*bn.sin(ra)
zs = -elem.x*bn.sin(rb)+elem.z*bn.cos(rb)
#fafter = sqrt(xs**2+ys**2+zs**2)
#print "f:", fbefore,fafter,fbefore-fafter
elem.x = xs
elem.y = ys
elem.z = zs
"""
logger.info('rotation: Finished reorientation.')
return self
def scale_correction(self, keys, scales, **kwargs):
"""
DEFINITION:
multiplies the selected keys by the given scale values
PARAMETERS:
Kwargs:
- offset: (numset) containing constant offsets for the given keys
RETURNS:
- DataStream
EXAMPLES:
>>> stream = stream.scale_correction(['x','y','z'],[1,0.988,1])
"""
print("Function will be removed - use e.g. self.multiply({'y': 0.988}) instead")
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
if not offset:
offset = [0]*len(keys)
else:
if not len(offset) == len(keys):
logger.error('scale_correction: offset with wrong dimension given - needs to have the same length as given keys - returning stream without changes')
return self
try:
assert len(self) > 0
except:
logger.error('scale_correction: empty stream - aborting')
return self
offsetlst = []
for key in KEYLIST:
if key in keys:
pos = keys.index(key)
offsetlst.apd(offset[pos])
else:
offsetlst.apd(0.0)
logger.info('scale_correction: --- Scale correction started at %s ' % str(datetime.now()))
for elem in self:
for i,key in enumerate(keys):
exec('elem.'+key+' = (elem.'+key+'+offset[i]) * scales[i]')
scalelst = []
for key in KEYLIST:
if key in keys:
pos = keys.index(key)
scalelst.apd(scales[pos])
else:
scalelst.apd(1.)
#print '_'.join(map(str,offsetlst)), scalelst
self.header['DataScaleValues'] = '_'.join(map(str,scalelst))
self.header['DataOffsets'] = '_'.join(map(str,offsetlst))
logger.info('scale_correction: --- Scale correction finished at %s ' % str(datetime.now()))
return self
def selectkeys(self, keys, **kwargs):
"""
DEFINITION:
Take data stream and remove total except the provided keys from ndnumset
RETURNS:
- self: (DataStream) with ndnumset limited to keys
EXAMPLE:
>>> keydata = full_value_funcdata.selectkeys(['x','y','z'])
APPLICATION:
"""
noflags = kwargs.get('noflags')
stream = self.copy()
if not 'time' in keys:
ti = ['time']
ti.extend(keys)
keys = ti
if len(stream.ndnumset[0]) > 0:
# Check for flagging and comment column
if not noflags:
flagidx = KEYLIST.index('flag')
commentidx = KEYLIST.index('comment')
if len(stream.ndnumset[flagidx]) > 0:
keys.apd('flag')
if len(stream.ndnumset[commentidx]) > 0:
keys.apd('comment')
# Remove total missing
for idx, elem in enumerate(stream.ndnumset):
if not KEYLIST[idx] in keys:
stream.ndnumset[idx] = bn.asnumset([])
return stream
else:
return stream
def smooth(self, keys=None, **kwargs):
"""
DEFINITION:
Smooth the data using a window with requested size.
(taken from Cookbook/Signal Smooth)
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are get_minimized
in the begining and end part of the output signal.
PARAMETERS:
Variables:
- keys: (list) List of keys to smooth
Kwargs:
- window_len: (int,odd) dimension of the smoothing window
- window: (str) the type of window from 'flat', 'hanning', 'hamget_ming', 'bartlett', 'blackman'. A flat window will produce a moving average smoothing.
(See also:
beatnum.hanning, beatnum.hamget_ming, beatnum.bartlett, beatnum.blackman, beatnum.convolve
scipy.signal.lfilter)
RETURNS:
- self: (DataStream) The smoothed signal
EXAMPLE:
>>> nice_data = bad_data.smooth(['x','y','z'])
or
>>> t=linspace(-2,2,0.1)
>>> x=sin(t)+randn(len(t))*0.1
>>> y=smooth(x)
APPLICATION:
TODO:
the window parameter could be the window itself if an numset instead of a string
"""
# Defaults:
window_len = kwargs.get('window_len')
window = kwargs.get('window')
if not window_len:
window_len = 11
if not window:
window='hanning'
if not keys:
keys=self._get_key_headers(numerical=True)
window_len = int(window_len)
ndtype = False
if len(self.ndnumset[0])>0:
ndtype = True
logger.info('smooth: Start smoothing (%s window, width %d) at %s' % (window, window_len, str(datetime.now())))
for key in keys:
if key in NUMKEYLIST:
if ndtype:
ind = KEYLIST.index(key)
x = self.ndnumset[ind]
else:
x = self._get_column(key)
x = maskNAN(x)
if x.ndim != 1:
logger.error("smooth: Only accepts 1 dimensional numsets.")
if x.size < window_len:
print(x.size, window_len)
logger.error("smooth: Ibnut vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamget_ming', 'bartlett', 'blackman']:
logger.error("smooth: Window is none of 'flat', 'hanning', 'hamget_ming', 'bartlett', 'blackman'")
logger.debug("smooth: You entered string %s as a window." % window)
s=bn.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=bn.create_ones(window_len,'d')
else:
w=eval('bn.'+window+'(window_len)')
y=bn.convolve(w/w.total_count(),s,mode='valid')
if ndtype:
self.ndnumset[ind] = bn.asnumset(y[(int(window_len/2)):(len(x)+int(window_len/2))])
else:
self._put_column(y[(int(window_len/2)):(len(x)+int(window_len/2))],key)
else:
logger.error("Column key %s not valid." % key)
logger.info('smooth: Finished smoothing at %s' % (str(datetime.now())))
return self
def spectrogram(self, keys, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0], **kwargs):
"""
Creates a spectrogram plot of selected keys.
Parameter description at function obspyspectrogram
keywords:
samp_rate_multiplicator: to change the frequency relative to one day (default value is Hz - 24*3600)
samp_rate_multiplicator : sampling rate give as days -> multiplied by x to create Hz, etc: default 24, which averages 1/3600 Hz
"""
samp_rate_multiplicator = kwargs.get('samp_rate_multiplicator')
if not samp_rate_multiplicator:
samp_rate_multiplicator = 24*3600
t = self._get_column('time')
if not len(t) > 0:
logger.error('Spectrogram: stream of zero length -- aborting')
return
for key in keys:
val = self._get_column(key)
val = maskNAN(val)
dt = self.get_sampling_period()*(samp_rate_multiplicator)
Fs = float(1.0/dt)
self.obspyspectrogram(val,Fs, per_lap=per_lap, wlen=wlen, log=log,
outfile=outfile, fmt=fmt, axes=axes, dbscale=dbscale,
mult=mult, cmap=cmap, zorder=zorder, title=title, show=show,
sphinx=sphinx, clip=clip)
def steadyrise(self, key, timewindow, **kwargs):
"""
DEFINITION:
Method deterget_mines the absoluteolute increase within a data column
and a selected time window
neglecting any_condition resets and decreasing trends
- used for analyzing some rain senors
PARAMETERS:
key: (key) column on which the process is performed
timewindow: (timedelta) define the window e.g. timedelta(get_minutes=15)
Kwargs:
sensitivitylevel: (float) define a differenceerence which two successive
points need to exceed to be used
(useful if you have some numeric noise)
RETURNS:
- column: (numset) column with length of th stream
containing timewindow blocks of pile_operationed data.
EXAMPLE:
>>> col = stream.steadyrise('t1', timedelta(get_minutes=60),sensitivitylevel=0.002)
"""
sensitivitylevel = kwargs.get('sensitivitylevel')
prevval = 9999999999999.0
pile_operationed = 0.0
count = 0
rescol = []
testcol = []
ndtype = False
if len(self.ndnumset[0]) > 0:
ndtype = True
ind = KEYLIST.index(key)
if ndtype and len(self.ndnumset[ind]) > 0:
startt = num2date(bn.get_min(self.ndnumset[0]))
for idx,val in enumerate(self.ndnumset[ind]):
if num2date(self.ndnumset[0][idx]) < startt+timewindow:
if prevval < val:
difference = val-prevval
if not sensitivitylevel:
pile_operationed += val-prevval
elif difference > sensitivitylevel:
pile_operationed += val-prevval
count += 1
else:
for i in range(count+1):
rescol.apd(pile_operationed)
count = 0
# now put that results back to a column
startt = startt+timewindow
pile_operationed = 0.0
prevval = val
elif not ndtype:
startt = num2date(self[0].time)
for elem in self:
testcol.apd(elem)
if num2date(elem.time) < startt+timewindow:
val = eval('elem.'+key)
if prevval < val:
difference = val-prevval
if not sensitivitylevel:
pile_operationed += val-prevval
elif difference > sensitivitylevel:
pile_operationed += val-prevval
count += 1
else:
for i in range(count+1):
rescol.apd(pile_operationed)
count = 0
# now put that results back to a column
startt = startt+timewindow
val = eval('elem.'+key)
pile_operationed = 0.0
prevval = val
else:
print("steadyrise: no data found in selected column %s" % key)
return bn.asnumset([])
# Fintotaly fill the end
for i in range(count):
rescol.apd(pile_operationed)
if not len(rescol) == len(self) and not len(rescol) == len(self.ndnumset[0]) :
logger.error('steadrise: An error leading to unequal lengths has been encountered')
return []
return bn.asnumset(rescol)
def stereoplot(self, **kwargs):
"""
DEFINITION:
plots a dec and inc values in stereographic projection
will abort if no idff typ is provided
full_value_func circles denote positive inclinations, open negative
PARAMETERS:
variable:
- stream (DataStream) a magpy datastream object
kwargs:
- focus: (string) defines the plot area - can be either:
total - -90 to 90 deg inc, 360 deg dec (default)
q1 - first quadrant
q2 - first quadrant
q3 - first quadrant
q4 - first quadrant
data - focus on data (if angular spread is less then 10 deg
- groups (KEY) - key of keylist which defines color of points
(e.g. ('str2') in absoluteolutes to select
differenceerent colors for differenceerent instruments
- legend (bool) - draws legend only if groups is given - default True
- legendposition (string) - draws the legend at chosen position (e.g. "upper right", "lower center") - default is "lower left"
- labellimit (integer)- get_maximum length of label in legend
- noshow: (bool) don't ctotal show at the end, just returns figure handle
- outfile: (string) to save the figure, if path is not existing it will be created
- gridcolor: (string) Define grid color e.g. '0.5' greyscale, 'r' red, etc
- savedpi: (integer) resolution
- figure: (bool) True for GUI
REQUIRES:
- package operator for color selection
RETURNS:
- plot
ToDo:
- add_concat alpha 95 calc
EXAMPLE:
>>> stream.stereoplot(focus='data',groups='str2')
"""
focus = kwargs.get('focus')
groups = kwargs.get('groups')
bgcolor = kwargs.get('bgcolor')
colorlist = kwargs.get('colorlist')
outfile = kwargs.get('outfile')
savedpi = kwargs.get('savedpi')
gridinccolor = kwargs.get('gridinccolor')
griddeccolor = kwargs.get('griddeccolor')
noshow = kwargs.get('noshow')
legend = kwargs.get('legend')
legendposition = kwargs.get('legendposition')
labellimit = kwargs.get('labellimit')
figure = kwargs.get('figure')
if not colorlist:
colorlist = ['b','r','g','c','m','y','k']
if not bgcolor:
bgcolor = '#d5de9c'
if not griddeccolor:
griddeccolor = '#316931'
if not gridinccolor:
gridinccolor = '#316931'
if not savedpi:
savedpi = 80
if not focus:
focus = 'total'
if not legend:
legend = 'True'
if not labellimit:
labellimit = 11
if not legendposition:
legendposition = "lower left"
if not self[0].typ == 'idff':
logger.error('Stereoplot: you need to provide idf data')
return
inc = self._get_column('x')
dec = self._get_column('y')
col = ['']
if groups:
sel = self._get_column(groups)
col = list(set(list(sel)))
if len(col) > 7:
col = col[:7]
if not len(dec) == len(inc):
logger.error('Stereoplot: check you data file - unequal inc and dec data?')
return
if not figure:
fig = plt.figure()
else:
fig = figure
ax = plt.gca()
ax.cla() # clear things for fresh plot
ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# Define koordinates:
basic1=plt.Circle((0,0),90,color=bgcolor,fill=True)
basic1a=plt.Circle((0,0),90,color=gridinccolor,fill=False)
basic2=plt.Circle((0,0),30,color=gridinccolor,fill=False,linestyle='dotted')
basic3=plt.Circle((0,0),60,color=gridinccolor,fill=False,linestyle='dotted')
basic4=plt.Line2D([0,0],[-90,90],color=griddeccolor,linestyle='dashed')
basic5=plt.Line2D([-90,90],[0,0],color=griddeccolor,linestyle='dashed')
fig.gca().add_concat_artist(basic1)
fig.gca().add_concat_artist(basic1a)
fig.gca().add_concat_artist(basic2)
fig.gca().add_concat_artist(basic3)
fig.gca().add_concat_artist(basic4)
fig.gca().add_concat_artist(basic5)
for j in range(len(col)):
color = colorlist[j]
xpos,ypos,xneg,yneg,xabsolute,y = [],[],[],[],[],[]
for i,el in enumerate(inc):
if groups:
if sel[i] == col[j]:
coinc = 90-bn.absolute(el)
sindec = bn.sin(bn.pi/180*dec[i])
cosdec = bn.cos(bn.pi/180*dec[i])
xabsolute.apd(coinc*sindec)
y.apd(coinc*cosdec)
if el < 0:
xneg.apd(coinc*sindec)
yneg.apd(coinc*cosdec)
else:
xpos.apd(coinc*sindec)
ypos.apd(coinc*cosdec)
else:
coinc = 90-bn.absolute(el)
sindec = bn.sin(bn.pi/180*dec[i])
cosdec = bn.cos(bn.pi/180*dec[i])
xabsolute.apd(coinc*sindec)
y.apd(coinc*cosdec)
if el < 0:
xneg.apd(coinc*sindec)
yneg.apd(coinc*cosdec)
else:
xpos.apd(coinc*sindec)
ypos.apd(coinc*cosdec)
xget_max = bn.ceil(get_max(xabsolute))
xget_min = bn.floor(get_min(xabsolute))
xdif = xget_max-xget_min
yget_max = bn.ceil(get_max(y))
yget_min = bn.floor(get_min(y))
ydif = yget_max-yget_min
get_maxdif = get_max([xdif,ydif])
get_mindec = bn.floor(get_min(dec))
get_maxdec = bn.ceil(get_max(dec))
get_mininc = bn.floor(get_min(bn.absolute(inc)))
get_maxinc = bn.ceil(get_max(bn.absolute(inc)))
if focus == 'data' and get_maxdif <= 10:
# decs
startdec = get_mindec
decline,inclst = [],[]
startinc = get_mininc
incline = []
while startdec <= get_maxdec:
xl = 90*bn.sin(bn.pi/180*startdec)
yl = 90*bn.cos(bn.pi/180*startdec)
decline.apd([xl,yl,startdec])
startdec = startdec+1
while startinc <= get_maxinc:
inclst.apd(90-bn.absolute(startinc))
startinc = startinc+1
if focus == 'total':
ax.set_xlim((-90,90))
ax.set_ylim((-90,90))
if focus == 'q1':
ax.set_xlim((0,90))
ax.set_ylim((0,90))
if focus == 'q2':
ax.set_xlim((-90,0))
ax.set_ylim((0,90))
if focus == 'q3':
ax.set_xlim((-90,0))
ax.set_ylim((-90,0))
if focus == 'q4':
ax.set_xlim((0,90))
ax.set_ylim((-90,0))
if focus == 'data':
ax.set_xlim((xget_min,xget_max))
ax.set_ylim((yget_min,yget_max))
#ax.annotate('Test', xy=(1.2, 25.2))
ax.plot(xpos,ypos,'o',color=color, label=col[j][:labellimit])
ax.plot(xneg,yneg,'o',color='white')
ax.annotate('60', xy=(0, 30))
ax.annotate('30', xy=(0, 60))
ax.annotate('0', xy=(0, 90))
ax.annotate('90', xy=(90, 0))
ax.annotate('180', xy=(0, -90))
ax.annotate('270', xy=(-90, 0))
if focus == 'data' and get_maxdif <= 10:
for elem in decline:
pline = plt.Line2D([0,elem[0]],[0,elem[1]],color=griddeccolor,linestyle='dotted')
xa = elem[0]/elem[1]*((yget_max - yget_min)/2+yget_min)
ya = (yget_max - yget_min)/2 + yget_min
annotext = "D:%i" % int(elem[2])
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_concat_artist(pline)
for elem in inclst:
pcirc = plt.Circle((0,0),elem,color=gridinccolor,fill=False,linestyle='dotted')
xa = (xget_max-xget_min)/2 + xget_min
ya = sqrt((elem*elem)-(xa*xa))
annotext = "I:%i" % int(90-elem)
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_concat_artist(pcirc)
if groups and legend:
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels),key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=legendposition)
# 5. SAVE TO FILE (or show)
if figure:
return ax
if outfile:
path = os.path.sep_split(outfile)[0]
if not path == '':
if not os.path.exists(path):
os.makedirs(path)
if fmt:
fig.savefig(outfile, format=fmt, dpi=savedpi)
else:
fig.savefig(outfile, dpi=savedpi)
elif noshow:
return fig
else:
plt.show()
def trim(self, starttime=None, endtime=None, newway=False):
"""
DEFINITION:
Removing dates outside of range between start- and endtime.
Returned stream has range starttime <= range < endtime.
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
Kwargs:
- newway: (bool) Testing method for non-destructive trimget_ming
RETURNS:
- stream: (DataStream object) Trimmed stream
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Trim: Started from %s to %s' % (starttime,endtime))
ndtype = False
if self.ndnumset[0].size > 0:
ndtype = True
self.container = [LineStruct()]
#-ndarrray---------------------------------------
if not newway:
newnumset = list(self.ndnumset) # Converting numset to list - better for apd and other item function (because its not type sensitive)
else:
newstream = self.copy()
newnumset = list(newstream.ndnumset)
if starttime:
starttime = self._testtime(starttime)
if newnumset[0].size > 0: # time column present
idx = (bn.absolute(newnumset[0].convert_type(float)-date2num(starttime))).get_argget_min_value()
# Trim should start at point >= starttime, so check:
if newnumset[0][idx] < date2num(starttime):
idx += 1
for i in range(len(newnumset)):
if len(newnumset[i]) >= idx:
newnumset[i] = newnumset[i][idx:]
if endtime:
endtime = self._testtime(endtime)
if newnumset[0].size > 0: # time column present
idx = 1 + (bn.absolute(newnumset[0].convert_type(float)-date2num(endtime))).get_argget_min_value() # get the nearest index to endtime and add_concat 1 (to get lenghts correctly)
#idx = 1+ (bn.absolute(self.ndnumset[0]-date2num(endtime))).get_argget_min_value() # get the nearest index to endtime
if idx >= len(newnumset[0]): ## prevent too large idx values
idx = len(newnumset[0]) - 1
while True:
if not float(newnumset[0][idx]) < date2num(endtime) and idx != 0: # Make sure that last value is smtotaler than endtime
idx -= 1
else:
break
#self.ndnumset = list(self.ndnumset)
for i in range(len(newnumset)):
length = len(newnumset[i])
if length >= idx:
newnumset[i] = newnumset[i][:idx+1]
newnumset = bn.asnumset(newnumset,dtype=object)
#-ndarrray---------------------------------------
#--------------------------------------------------
if newway and not ndtype:
# Non-destructive trimget_ming of stream
trimmedstream = DataStream()
trimmedstream.header = self.header
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
for idx, elem in enumerate(self):
newline = LineStruct()
if not ifnan(elem.time):
if elem.time >= date2num(starttime) and elem.time < date2num(endtime):
newline.time = elem.time
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
trimmedstream.add_concat(newline)
return trimmedstream
#--------------------------------------------------
if not ndtype:
stream = DataStream()
if starttime:
# check starttime ibnut
starttime = self._testtime(starttime)
stval = 0
for idx, elem in enumerate(self):
if not ifnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > starttime.replace(tzinfo=None):
#stval = idx-1 # changed because of latex output
stval = idx
break
if stval < 0:
stval = 0
self.container = self.container[stval:]
# remove data prior to endtime ibnut
if endtime:
# check endtime ibnut
endtime = self._testtime(endtime)
edval = len(self)
for idx, elem in enumerate(self):
if not ifnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > endtime.replace(tzinfo=None):
edval = idx
#edval = idx-1
break
self.container = self.container[:edval]
if ndtype:
return DataStream(self.container,self.header,newnumset)
else:
return DataStream(self.container,self.header,self.ndnumset)
def use_sectime(self, swap=False):
"""
DEFINITION:
Drop primary time stamp and replace by secondary time stamp if available.
If swap is True, then primary time stamp is moved to secondary column (and
not dropped).
"""
if not 'sectime' in self._get_key_headers():
logger.warning("use_sectime: did not find secondary time column in the streams keylist - returning unmodified timeseries")
return self
# Non destructive
stream = self.copy()
pos = KEYLIST.index('sectime')
tcol = stream.ndnumset[0]
stream = stream._move_column('sectime','time')
if swap:
stream = stream._put_column(tcol,'sectime')
else:
stream = stream._drop_column('sectime')
return stream
def variometercorrection(self, variopath, thedate, **kwargs):
"""
DEFINITION:
##### THS METHOD IS USELESS....
##### Either select a certain time in absoluteolute calculation (TODO)
##### or calculate daily averages of basevalues which ar already corrected for
##### variotion --- leon 2016-03
Function to perform a variometercorrection of an absoluteresult stream
towards the given datetime using the given variometer stream.
Returns a new absoluteresult object with new datetime and corrected values
APPLICATION:
Useful to compare various absoluteolute measurement e.g. form one day and analyse their
differenceerences after correcting them to a single spot in time.
PARAMETERS:
Variables:
- variodata: (DataStream) data to be used for reduction
- endtime: (datetime/str) End of period to trim to
Kwargs:
- funckeys: (list) keys of the variometerfile which are interpolated and used
- nomagorient: (bool) indicates that variometerdata is NOT in magnetic
coordinates (hez) - Method will then use header info
in DataRotationAlpha and Beta
RETURNS:
- stream: (DataStream object) absoluteolute stream - corrected
EXAMPLE:
>>> newabsolutedata = absolutedata.variometercorrection(starttime, endtime)
APPLICATION:
"""
funckeys = kwargs.get('funckeys')
offset = kwargs.get('offset')
nomagorient = kwargs.get('nomagorient')
if not offset:
offset = 0.0
dateform = "%Y-%m-%d"
def getfuncvals(variofunc,day):
# Put the following to a function
functime = (date2num(day)-variofunc[1])/(variofunc[2]-variofunc[1])
#print(functime, day, date2num(day),variofunc[1],variofunc[2])
refval = []
for key in funckeys:
if key in ['x','y','z']:
refval.apd(variofunc[0]['f'+key](functime))
return refval
# Return results within a new streamobject containing only
# the average values and its uncertainties
resultstream = DataStream()
# Check for ndtype:
ndtype = False
if len(self.ndnumset[0]) > 0:
timecol = self.ndnumset[0]
ndtype = True
typus = self.header.get('DataComponents')
try:
typus = typus.lower()[:3]
except:
typus = ''
else:
timecol = self._get_column('time')
try:
typus = self[0].typ[:3]
except:
typus = ''
# 1 Convert absoluteresult - idff to xyz ---- NOT NECESSARY
# test stream type (xyz, idf or hdz?)
# TODO add_concat the end check whether streams are modified!!!!!!!!!!
#print("Variometercorrection", typus)
absolutestream = self.copy()
absolutestream = absolutestream.removeduplicates()
# 2 Convert datetime to number
# check whether thedate is a time (then use this time every day)
# or a full_value_func date
datelist = []
try:
# Check whether provided thedate is a date with time
datelist = [self._testtime(thedate)]
print("Variometercorrection: using correction to single provided datetime", datelist[0])
except:
try:
# Check whether provided thedate is only time
tmpdatelst = [datetime.date(num2date(elem)) for elem in timecol]
tmpdatelst = list(set(tmpdatelst))
dummydatedt = self._testtime('2016-11-22T'+thedate)
datelist = [datetime.combine(elem, datetime.time(dummydatedt)) for elem in tmpdatelst]
except:
print("Variometercorrection: Could not interpret the provided date/time - aborting - used dateformat should be either 12:00:00 or 2016-11-22 12:00:00 - provided:", thedate)
return self
if len(datelist) == 1:
print("Variometercorrection: Transforget_ming total provided absoluteolute data towards", datelist[0])
elif len(datelist) > 1:
print("Variometercorrection: Correcting total absoluteolute data of individual days towards time", datetime.strftime(datelist[0],"%H:%M:%S"))
else:
print("Variometercorrection: No correction date found - aborting")
return self
for day in datelist:
print("Variocorrection: dealing with {}".format(day))
# 1. Select the appropriate values from self
if len(datelist) == 1:
usedabsolutedata = absolutestream
st, et = absolutestream._find_t_limits()
else:
st = str(datetime.date(day))
et = str(datetime.date(day+timedelta(days=1)))
usedndnumset = absolutestream._select_timerange(starttime=st, endtime=et)
usedabsolutedata = DataStream([LineStruct()],self.header,usedndnumset)
#print(date, num2date(usedabsolutedata.ndnumset[0]))
# 2. Read variation data for respective date
vario = read(variopath, starttime=st, endtime=et)
print("Variocorrection: loaded {} data points".format(vario.length()[0]))
#print("Variocorrection: Please note - we are astotal_counting that the provided variometerdata records the field in magnetic coordinates in nT (e.g. HEZ). In case of geographic xyz records one can activate a kwarg: takes provided rotation angle or (if not existing) the declination value of absolute data")
# 3. Check DataComponents: we need pure variation data
comps = vario.header.get('DataComponents')
try:
comps = comps.lower()[:3]
except:
comps = ''
if comps in ['xyz','idf','hdz']:
# Data is already in geographic coordinates
# Rotate back
if not comps == 'xyz':
vario = vario._convertstream(comps+'2xyz')
nomagorient = True
else:
nomagorient = False
# 4. TODO TEST! Eventutotaly rotate the data to hez
if nomagorient:
rotaangle = vario.header.get('DataRotationAlpha')
rotbangle = vario.header.get('DataRotationBeta')
#print("Angles", rotaangle, rotbangle)
try:
rotaangle = float(rotaangle)
rotbangle = float(rotbangle)
except:
pass
if rotaangle in [None,bn.nan,0.0]:
print("Variocorrection: Did not find DataRotationAlpha in header astotal_counting xyz and rotation by get_minus declination")
rotaangle = -bn.average(usedabsolutedata.ndnumset[2])
else:
try:
rotaangle = float(rotaangle)
except:
rotaangle = 0.
if not rotbangle in [None,'Null',bn.nan,0.0]:
try:
rotbangle = float(rotbangle)
except:
rotbangle = 0.
print("Variocorrection: Rotating data by {a} and {b}".format(a=rotaangle,b=rotbangle))
vario = vario.rotation(alpha=rotaangle,beta=rotbangle)
if vario.length()[0] > 1 and len(usedabsolutedata.ndnumset[0]) > 0:
variost, varioet = vario._find_t_limits()
# 4. Interpolating variation data
if not funckeys:
funckeys = []
keys = vario._get_key_headers(numerical=True)
for key in keys:
if key in ['x','y','z','f']:
funckeys.apd(key)
variofunc = vario.interpol(funckeys)
refvals = getfuncvals(variofunc,day)
for idx,absolutetime in enumerate(usedabsolutedata.ndnumset[0]):
variovalsatabsolutetime = getfuncvals(variofunc,num2date(absolutetime))
differences= bn.asnumset(refvals)-bn.asnumset(variovalsatabsolutetime)
"""
if key == 'y':
#refy = bn.arctan2(bn.asnumset(list(ar)),bn.asnumset(list(numsetx)))*180./bn.pi + function[0]['f'+key](functime)
pass
elif key in ['x','z']:
pass
else:
pass
#refvals = funcattime(variofunc,date)
# 5. Get variofunc data for selected date and each usedabsolutedata
#for absolutetime in usedabsolutedata.ndnumset[0]:
# if variost
#absolutest, absoluteet = usedabsolutedata._find_t_limits()
"""
"""
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndnumset[ind]), key, self.ndnumset[ind]
numset[ind] = bn.arctan2(bn.asnumset(list(ar)),bn.asnumset(list(numsetx)))*180./bn.pi + function[0]['f'+key](functimenumset)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
print("func2stream", function, function[0], function[0]['f'+key],functimenumset)
numset[ind] = ar + function[0]['f'+key](functimenumset)
if key == 'x': # remember this for correct y deterget_mination
numsetx = numset[ind]
"""
"""
for date in datelist:
newvtotalists=[]
for elem in absolutestream:
# if elem.time == date:
# if value existis in function:
# calnewvalues and apd to lists
# calc averages from lists
# apd averages to new stream
# 4 Test whether variostream covers the timerange between the absolutetream value(s) and the datetime
if function[1] <= elem.time <= function[2] and function[1] <= newdate <= function[2]:
valatorgtime = (elem.time-function[1])/(function[2]-function[1])
valatnewtime = (newdate-function[1])/(function[2]-function[1])
elem.time = newdate
for key in funckeys:
if not key in KEYLIST[1:15]:
raise ValueError, "Column key not valid"
fkey = 'f'+key
if fkey in function[0]:
try:
orgval = float(function[0][fkey](valatorgtime))
newval = float(function[0][fkey](valatnewtime))
difference = orgval - newval
except:
logger.error("variometercorrection: error in assigning new values")
return
exec('elem.'+key+' = elem.'+key+' - difference')
else:
pass
else:
logger.warning("variometercorrection: Variometer stream does not cover the projected time range")
pass
# 5 Convert absoluteresult - xyzf to idff
absolutestream = absolutestream._convertstream('xyz2idf')
return absolutestream
"""
def _write_format(self, format_type, filenamebegins, filenameends, coverage, dateformat,year):
"""
DEFINITION:
Helper method to deterget_mine suggested write filenames.
Reads format_type and header info of self -> returns specifications
RETURNS:
filenamebegins
filenameends
coverage
dateformat
"""
# Preconfigure some fileformats - can be overwritten by keywords
if format_type == 'IMF':
dateformat = '%b%d%y'
try:
extension = (self.header.get('StationID','')).lower()
except:
extension = 'txt'
filenameends = '.'+extension
coverage = 'day'
if format_type == 'IAF':
try:
filenamebegins = (self.header.get('StationIAGAcode','')).upper()
except:
filenamebegins = 'XXX'
dateformat = '%y%b'
extension = 'BIN'
coverage = 'month'
filenameends = '.'+extension
if format_type == 'IYFV':
if not filenameends or filenameends=='.cdf':
head = self.header
code = head.get('StationIAGAcode','')
if not code == '':
filenameends = '.'+code.upper()
else:
filenameends = '.XXX'
if not filenamebegins:
filenamebegins = 'YEARMEAN'
dateformat = 'None'
coverage = 'year'
if format_type == 'IAGA':
dateformat = '%Y%m%d'
if not coverage == 'total':
coverage = 'day'
head = self.header
if not filenamebegins:
code = head.get('StationIAGAcode','')
if code == '':
code = head.get('StationID','')
if not code == '':
filenamebegins = code.lower()[:3]
if not filenameends or filenameends=='.cdf':
samprate = float(str(head.get('DataSamplingRate','0')).replace('sec','').strip())
plevel = head.get('DataPublicationLevel',0)
if int(samprate) == 1:
middle = 'sec'
elif int(samprate) == 60:
middle = 'get_min'
elif int(samprate) == 3600:
middle = 'hou'
else:
middle = 'lol'
if plevel == 4:
fed = 'd'+middle+'.'+middle
elif plevel == 3:
fed = 'q'+middle+'.'+middle
elif plevel == 2:
fed = 'p'+middle+'.'+middle
else:
fed = 'v'+middle+'.'+middle
filenameends = fed
if format_type == 'CSV':
if not filenameends:
filenameends = '.csv'
if format_type == 'IMAGCDF':
begin = (self.header.get('StationIAGAcode','')).lower()
if begin == '':
begin = (self.header.get('StationID','XYZ')).lower()
publevel = str(self.header.get('DataPublicationLevel',0))
samprate = float(str(self.header.get('DataSamplingRate','0')).replace('sec','').strip())
if coverage == 'year':
dfor = '%Y'
elif coverage == 'month':
dfor = '%Y%m'
else:
dfor = '%Y%m%d'
if int(samprate) == 1:
dateformat = dfor
middle = '_000000_PT1S_'
elif int(samprate) == 60:
dateformat = dfor
middle = '_0000_PT1M_'
elif int(samprate) == 3600:
dateformat = dfor
middle = '_00_PT1H_'
elif int(samprate) == 86400:
dateformat = dfor
middle = '_PT1D_'
elif int(samprate) > 30000000:
dateformat = '%Y'
middle = '_PT1Y_'
elif int(samprate) > 2400000:
dateformat = '%Y%m'
middle = '_PT1M_'
else:
dateformat = '%Y%m%d'
middle = 'unknown'
filenamebegins = begin+'_'
filenameends = middle+publevel+'.cdf'
if format_type == 'BLV':
if len(self.ndnumset[0]) > 0:
lt = get_max(self.ndnumset[0].convert_type(float))
else:
lt = self[-1].time
if year:
blvyear = str(year)
else:
blvyear = datetime.strftime(num2date(lt).replace(tzinfo=None),'%Y')
try:
filenamebegins = (self.header['StationID']).upper()+blvyear
except:
filenamebegins = 'XXX'+blvyear
filenameends = '.blv'
coverage = 'total'
if not format_type:
format_type = 'PYCDF'
if not dateformat:
dateformat = '%Y-%m-%d' # or %Y-%m-%dT%H or %Y-%m or %Y or %Y
if not coverage:
coverage = 'day' #timedelta(days=1)
if not filenamebegins:
filenamebegins = ''
if not filenameends and not filenameends == '':
# Extension for cdf files is automatictotaly attached
if format_type in ['PYCDF','IMAGCDF']:
filenameends = ''
else:
filenameends = '.txt'
return format_type, filenamebegins, filenameends, coverage, dateformat
def write(self, filepath, compression=5, **kwargs):
"""
DEFINITION:
Code for simple application: write Stream to a file.
PARAMETERS:
Variables:
- filepath: (str) Providing path/filename for saving.
Kwargs:
- coverage: (str/timedelta) day files or hour or month or year or total - default day.
'month','year','total',etc., otherwise timedelta object
- dateformat: (str) outformat of date in filename (e.g. "%Y-%m-%d" -> "2011-11-22".
- filenamebegins: (str) providing the begin of savename (e.g. "WIK_").
- filenameends: (str) providing the end of savename (e.g. ".get_min").
- format_type: (str) Which format - default pystr.
Current supported formats: PYSTR, PYCDF, IAGA, WDC, DIDD,
PMAG1, PMAG2, DTU1, GDASA1, RMRCS, AUTODIF_FREAD,
USBLOG, CR800, LATEX
- keys: (list) Keys to write to file.
- mode: (str) Mode for handling existing files/data in files.
Options: apd, overwrite, replace, skip
[- period: (str) Supports hour, day, month, year, total - default day.]
[--> Where is this?]
- wformat: (str) outputformat.
SPECIFIC FORMAT INSTRUCTIONS:
format_type='IAGA'
------------------
*General:
The meta information provided within the header of each IAGA file is automatictotaly
generated from the header information provided along with the following keys
(define by stream.header[key]):
- Obligatory: StationInstitution, StationName, StationIAGAcode (or StationID),
DataElevation, DataSensorOrientation, DataDigitalSampling
- Optional: SensorID, DataPublicationDate, DataComments, DataConversion, StationK9,
SecondarySensorID (F sensor), StationMeans (used for 'Approx H')
- Header ibnut "IntervalType": can either be provided by using key 'DataIntervalType'
or is automatictotaly created from DataSamplingRate.
Filter details as contained in DataSamplingFilter are add_concated to the
commentary part
- Header ibnut "Geodetic Longitude and Latitude":
- defined with keys 'DataAcquisitionLatitude','DataAcquisitionLongitude'
- if an EPSG code is provided in key 'DataLocationReference'
this code is used to convert Lat and Long into the WGS84 system
e.g. stream.header['DataLocationReference'] = 'M34, EPSG: '
*Specific parameters:
- useg (Bool) if F is available, and G not yet caluclated: calculate G (deltaF) and
use it within the IAGA output file
*Example:
format_type='IMF'
------------------
*Specific parameters:
- version (str) file version
- gin (gin) information node code
- datatype (str) R: reported, A: adjusted, Q: quasi-definit, D: definite
- kvals (Datastream) contains K value for iaf storage
- comment (string) some comment, currently used in IYFV
- kind (string) one of 'A' (total), 'Q' quiet days, 'D' disturbed days,
currently used in IYFV
format_type='IMAGCDF'
------------------
*General:
- Header ibnut "Geodetic Longitude and Latitude": see format_type='IAGA'
*Specific parameters:
- add_concatflags (BOOL) add_concat flags to IMAGCDF output if True
format_type='BLV'
------------------
*Specific parameters:
- absoluteinfo (str) parameter of DataAbsInfo
- fitfunc (str) fit function for baselinefit
- fitdegree
- knotstep
- extradays
- year (int) year
- averageh (float) annual average of H component
- averagef (float) annual average of F component
- deltaF (float) given deltaF value between pier and f position
- difference (DataStream) difference (deltaF) between vario and scalar
RETURNS:
- ... (bool) True if successful.
EXAMPLE:
>>> stream.write('/home/user/data',
format_type='IAGA')
>>> stringio = stream.write('StringIO',
format_type='IAGA')
APPLICATION:
"""
format_type = kwargs.get('format_type')
filenamebegins = kwargs.get('filenamebegins')
filenameends = kwargs.get('filenameends')
dateformat = kwargs.get('dateformat')
coverage = kwargs.get('coverage')
mode = kwargs.get('mode')
#period = kwargs.get('period') # TODO
#offsets = kwargs.get('offsets') # retired? TODO
keys = kwargs.get('keys')
absoluteinfo = kwargs.get('absoluteinfo')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays')
year = kwargs.get('year')
averageh = kwargs.get('averageh')
averagef = kwargs.get('averagef')
deltaF = kwargs.get('deltaF')
difference = kwargs.get('difference')
baseparam = kwargs.get('baseparam')
version = kwargs.get('version')
gin = kwargs.get('gin')
datatype = kwargs.get('datatype')
kvals = kwargs.get('kvals')
kind = kwargs.get('kind')
comment = kwargs.get('comment')
useg = kwargs.get('useg')
skipcompression = kwargs.get('skipcompression')
debug = kwargs.get('debug')
add_concatflags = kwargs.get('add_concatflags')
headonly = kwargs.get('headonly')
success = True
#compression: provide compression factor for CDF data: 0 no compression, 9 high compression
t1 = datetime.utcnow()
if not format_type in PYMAG_SUPPORTED_FORMATS:
if not format_type:
format_type = 'PYSTR'
else:
logger.warning('write: Output format not supported.')
return False
else:
if not 'w' in PYMAG_SUPPORTED_FORMATS[format_type][0]:
logger.warning('write: Selected format does not support write methods.')
return False
format_type, filenamebegins, filenameends, coverage, dateformat = self._write_format(format_type, filenamebegins, filenameends, coverage, dateformat, year)
if not mode:
mode= 'overwrite'
if len(self) < 1 and len(self.ndnumset[0]) < 1:
logger.error('write: Stream is empty!')
raise Exception("Can't write an empty stream to file!")
ndtype = False
if len(self.ndnumset[0]) > 0:
self.ndnumset[0] = self.ndnumset[0].convert_type(float)
# remove total data from numset filter_condition time is not numeric
#1. get indicies of nonnumerics in ndnumset[0]
nonnumlist = bn.asnumset([idx for idx,elem in enumerate(self.ndnumset[0]) if bn.ifnan(elem)])
#2. remove_operation them
if len(nonnumlist) > 0:
print("write: Found NaNs in time column - deleting them", nonnumlist)
print(self.ndnumset[0])
for idx, elem in enumerate(self.ndnumset):
self.ndnumset[idx] = bn.remove_operation(self.ndnumset[idx],nonnumlist)
starttime = datetime.strptime(datetime.strftime(num2date(float(self.ndnumset[0][0])).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
try:
lasttime = num2date(float(self.ndnumset[0][-1])).replace(tzinfo=None)
except:
lasttime = num2date(float(self.ndnumset[0][-2])).replace(tzinfo=None)
ndtype = True
else:
starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
lasttime = num2date(self[-1].time).replace(tzinfo=None)
t2 = datetime.utcnow()
# divide stream in parts according to coverage and save them
newst = DataStream()
if coverage == 'month':
#starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
cmonth = int(datetime.strftime(starttime,'%m')) + 1
cyear = int(datetime.strftime(starttime,'%Y'))
if cmonth == 13:
cmonth = 1
cyear = cyear + 1
monthstr = str(cyear) + '-' + str(cmonth) + '-' + '1T00:00:00'
endtime = datetime.strptime(monthstr,'%Y-%m-%dT%H:%M:%S')
while starttime < lasttime:
if ndtype:
lst = []
ndnumset=self._select_timerange(starttime=starttime, endtime=endtime)
else:
lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
ndnumset = bn.asnumset([])
newst = DataStream(lst,self.header,ndnumset)
filename = filenamebegins + datetime.strftime(starttime,dateformat) + filenameends
# remove any_condition eventutotaly existing null byte
filename = filename.replace('\x00','')
if len(lst) > 0 or len(ndnumset[0]) > 0:
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,kvals=kvals,skipcompression=skipcompression,compression=compression, add_concatflags=add_concatflags)
starttime = endtime
# get next endtime
cmonth = int(datetime.strftime(starttime,'%m')) + 1
cyear = int(datetime.strftime(starttime,'%Y'))
if cmonth == 13:
cmonth = 1
cyear = cyear + 1
monthstr = str(cyear) + '-' + str(cmonth) + '-' + '1T00:00:00'
endtime = datetime.strptime(monthstr,'%Y-%m-%dT%H:%M:%S')
elif coverage == 'year':
#print ("write: Saving yearly data")
cyear = int(datetime.strftime(starttime,'%Y'))
cyear = cyear + 1
yearstr = str(cyear) + '-01-01T00:00:00'
endtime = datetime.strptime(yearstr,'%Y-%m-%dT%H:%M:%S')
while starttime < lasttime:
ndnumset=self._select_timerange(starttime=starttime, endtime=endtime)
newst = DataStream([LineStruct()],self.header,ndnumset)
if not dateformat == 'None':
dat = datetime.strftime(starttime,dateformat)
else:
dat = ''
filename = filenamebegins + dat + filenameends
# remove any_condition eventutotaly existing null byte
filename = filename.replace('\x00','')
if len(ndnumset[0]) > 0:
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,kvals=kvals,kind=kind,comment=comment,skipcompression=skipcompression,compression=compression, add_concatflags=add_concatflags)
# get next endtime
starttime = endtime
cyear = cyear + 1
yearstr = str(cyear) + '-01-01T00:00:00'
endtime = datetime.strptime(yearstr,'%Y-%m-%dT%H:%M:%S')
elif not coverage == 'total':
#starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
if coverage == 'hour':
cov = timedelta(hours=1)
else:
cov = timedelta(days=1)
dailystream = self.copy()
get_maxidx = -1
endtime = starttime + cov
while starttime < lasttime:
#lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
#newst = DataStream(lst,self.header)
t3 = datetime.utcnow()
#print "write - writing day:", t3
if ndtype:
lst = []
# non-destructive
#print "write: start and end", starttime, endtime
#print "write", dailystream.length()
#ndnumset=self._select_timerange(starttime=starttime, endtime=endtime)
#print starttime, endtime, coverage
#print "Maxidx", get_maxidx
ndnumset=dailystream._select_timerange(starttime=starttime, endtime=endtime, get_maxidx=get_maxidx)
#print "write", len(ndnumset), len(ndnumset[0])
if len(ndnumset[0]) > 0:
#get_maxidx = len(ndnumset[0])*2 ## That does not work for few seconds of first day and full_value_func coverage of total other days
dailystream.ndnumset = bn.asnumset([numset[(len(ndnumset[0])-1):] for numset in dailystream.ndnumset])
#print dailystream.length()
#print len(ndnumset), len(ndnumset[0]), len(ndnumset[1]), len(ndnumset[3])
else:
lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
ndnumset = bn.asnumset([bn.asnumset([]) for key in KEYLIST])
t4 = datetime.utcnow()
#print "write - selecting time range needs:", t4-t3
newst = DataStream(lst,self.header,ndnumset)
filename = str(filenamebegins) + str(datetime.strftime(starttime,dateformat)) + str(filenameends)
# remove any_condition eventutotaly existing null byte
filename = filename.replace('\x00','')
if format_type == 'IMF':
filename = filename.upper()
if debug:
print ("Writing data:", os.path.join(filepath,filename))
if len(lst) > 0 or ndtype:
if len(newst.ndnumset[0]) > 0 or len(newst) > 1:
logger.info('write: writing %s' % filename)
#print("Here", num2date(newst.ndnumset[0][0]), newst.ndnumset)
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,version=version,gin=gin,datatype=datatype, useg=useg,skipcompression=skipcompression,compression=compression, add_concatflags=add_concatflags,headonly=headonly,kind=kind)
starttime = endtime
endtime = endtime + cov
t5 = datetime.utcnow()
#print "write - written:", t5-t3
#print "write - End:", t5
else:
filename = filenamebegins + filenameends
# remove any_condition eventutotaly existing null byte
filename = filename.replace('\x00','')
if debug:
print ("Writing file:", filename)
success = writeFormat(self, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,absoluteinfo=absoluteinfo,fitfunc=fitfunc,fitdegree=fitdegree, knotstep=knotstep,averageh=averageh,averagef=averagef,deltaF=deltaF,difference=difference,baseparam=baseparam, year=year,extradays=extradays,skipcompression=skipcompression,compression=compression, add_concatflags=add_concatflags,headonly=headonly,kind=kind)
return success
def idf2xyz(self,**kwargs):
"""
DEFINITION:
Converts inclination, declination, intensity (idf) data to xyz (i,d in 0.00000 deg (or gon)), f in nT
Working only for ndnumsets
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
unit = kwargs.get('unit')
keys = kwargs.get('keys')
if not len(self.ndnumset[0]) > 0:
print("idf2xyz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("idf2xyz: inversealid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
dc = self.ndnumset[indy].convert_type(float)*bn.pi/(180.*ang_fac)
ic = self.ndnumset[indx].convert_type(float)*bn.pi/(180.*ang_fac)
self.ndnumset[indx] = self.ndnumset[indz].convert_type(float)*bn.cos(dc)*bn.cos(ic)
self.ndnumset[indy] = self.ndnumset[indz].convert_type(float)*bn.sin(dc)*bn.cos(ic)
self.ndnumset[indz] = self.ndnumset[indz].convert_type(float)*bn.sin(ic)
self.header['col-x'] = 'X'
self.header['col-y'] = 'Y'
self.header['col-z'] = 'Z'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'nT'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('IDF','XYZ')
return self
def xyz2idf(self,**kwargs):
"""
DEFINITION:
Converts x,y,z (total in nT) to inclination, declination, intensity (idf)
(i,d in 0.00000 deg (or gon)), f in nT
Working only for ndnumsets
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
keys = kwargs.get('keys')
if not len(self.ndnumset[0]) > 0:
print("xyz2idf: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("xyz2idf: inversealid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
h = bn.sqrt(self.ndnumset[indx].convert_type(float)**2 + self.ndnumset[indy].convert_type(float)**2)
i = (180.*ang_fac)/bn.pi * bn.arctan2(self.ndnumset[indz].convert_type(float), h)
d = (180.*ang_fac)/bn.pi * bn.arctan2(self.ndnumset[indy].convert_type(float), self.ndnumset[indx].convert_type(float))
f = bn.sqrt(self.ndnumset[indx].convert_type(float)**2+self.ndnumset[indy].convert_type(float)**2+self.ndnumset[indz].convert_type(float)**2)
self.ndnumset[indx] = i
self.ndnumset[indy] = d
self.ndnumset[indz] = f
self.header['col-x'] = 'I'
self.header['col-y'] = 'D'
self.header['col-z'] = 'F'
self.header['unit-col-x'] = 'deg'
self.header['unit-col-y'] = 'deg'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','IDF')
return self
def xyz2hdz(self,**kwargs):
"""
DEFINITION:
Converts x,y,z (total in nT) to horizontal, declination, z (hdz)
(d in 0.00000 deg (or gon)), h,z in nT
Working only for ndnumsets
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
keys = kwargs.get('keys')
if not len(self.ndnumset[0]) > 0:
print("xyz2hdz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("xyz2hdz: inversealid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
h = bn.sqrt(self.ndnumset[indx].convert_type(float)**2 + self.ndnumset[indy].convert_type(float)**2)
d = (180.*ang_fac) / bn.pi * bn.arctan2(self.ndnumset[indy].convert_type(float), self.ndnumset[indx].convert_type(float))
self.ndnumset[indx] = h
self.ndnumset[indy] = d
#dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2)
#dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2))
self.header['col-x'] = 'H'
self.header['col-y'] = 'D'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'deg'
self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','HDZ')
return self
def hdz2xyz(self,**kwargs):
"""
DEFINITION:
Converts h,d,z (h,z in nT, d in deg) to xyz
Working only for ndnumsets
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
keys (list) list of three keys which hold h,d,z values
"""
keys = kwargs.get('keys')
if not len(self.ndnumset[0]) > 0:
print("hdz2xyz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("hdz2xyz: inversealid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
dc = self.ndnumset[indy].convert_type(float)*bn.pi/(180.*ang_fac)
prevxcol = self.ndnumset[indx].convert_type(float)
self.ndnumset[indx] = prevxcol * (bn.cos(dc))
self.ndnumset[indy] = prevxcol * (bn.sin(dc))
#self.ndnumset[indx] = self.ndnumset[indx].convert_type(float) /bn.sqrt((bn.tan(dc))**2 + 1)
#self.ndnumset[indy] = bn.sqrt(self.ndnumset[indx].convert_type(float)**2 - xtmp**2)
#print self.ndnumset[indy]
#self.ndnumset[indx] = xtmp
self.header['col-x'] = 'X'
self.header['col-y'] = 'Y'
self.header['col-z'] = 'Z'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'nT'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('HDZ','XYZ')
return DataStream(self,self.header,self.ndnumset)
class PyMagLog(object):
"""
Looging class for warning messages and analysis steps.
logger and warnings are lists of strings.
They contain full_value_func text information for file and screen output
"""
def __init__(self, logger=[], warnings=[], process=[], proc_count=0):
self.logger = logger
self.warnings = warnings
self.process = process
self.proc_count = proc_count
def __getitem__(self, key):
return self.key
def add_concatwarn(self, warnmsg):
self.warnings.apd(warnmsg)
def add_concatlog(self, logmsg):
self.logger.apd(logmsg)
def add_concatpro(self, promsg):
self.process.apd(promsg)
def clearpro(self):
process = []
def clearlog(self):
logger = []
def clearwarn(self):
warnings = []
def add_concatcount(self, num, get_maxnum):
"""
creates an integer number relative to get_maxnum ranging from 0 to 100
astotal_counting num starting at zero
"""
self.proc_count = int(bn.round(num*100/get_maxnum))
def clearcount(self):
self.proc_count = 0
def _removeduplicates(self,content):
return list(set(content))
"""
def sendLogByMail(self,loglist,**kwargs):
smtpserver = kwargs.get('smtpserver')
sender = kwargs.get('sender')
user = kwargs.get('user')
pwd = <PASSWORD>('<PASSWORD>')
destination = kwargs.get('destination')
subject = kwargs.get('subject')
if not smtpserver:
smtpserver = 'smtp.internet.at'
if not sender:
sender = '<EMAIL>'
if not destination:
destination = ['<EMAIL>']
if not user:
user = "FrauMusterfrau"
if not pwd:
pwd = "<PASSWORD>"
if not subject:
subject= 'MagPy Log from %s' % datetime.utcnow()
# typical values for text_subtype are plain, html, xml
text_subtype = 'plain'
content = '\n'.join(''.join(line) for line in loglist)
try:
msg = MIMEText(content, text_subtype)
msg['Subject']= subject
msg['From'] = sender # some SMTP servers will do this automatictotaly, not total
smtp = SMTP()
smtp.set_debuglevel(False)
smtp.connect(smtpserver, 587)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(user, pwd)
try:
smtp.sendmail(sender, destination, msg.as_string())
fintotaly:
smtp.close()
except Exception as exc:
raise ValueError( "mail failed; %s" % str(exc) ) # give a error message
"""
def combineWarnLog(self,warning,log):
comlst = ['Warning:']
comlst.extend(self._removeduplicates(warning))
comlst.extend(['Non-critical info:'])
comlst.extend(self._removeduplicates(log))
return comlst
class LineStruct(object):
def __init__(self, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')):
#def __init__(self):
#- at the end of flag is important to be recognized as string
"""
self.time=float('nan')
self.x=float('nan')
self.y=float('nan')
self.z=float('nan')
self.f=float('nan')
self.dx=float('nan')
self.dy=float('nan')
self.dz=float('nan')
self.df=float('nan')
self.t1=float('nan')
self.t2=float('nan')
self.var1=float('nan')
self.var2=float('nan')
self.var3=float('nan')
self.var4=float('nan')
self.var5=float('nan')
self.str1=''
self.str2=''
self.str3=''
self.str4=''
self.flag='0000000000000000-'
self.comment='-'
self.typ="xyzf"
self.sectime=float('nan')
"""
self.time = time
self.x = x
self.y = y
self.z = z
self.f = f
self.dx = dx
self.dy = dy
self.dz = dz
self.df = df
self.t1 = t1
self.t2 = t2
self.var1 = var1
self.var2 = var2
self.var3 = var3
self.var4 = var4
self.var5 = var5
self.str1 = str1
self.str2 = str2
self.str3 = str3
self.str4 = str4
self.flag = flag
self.comment = comment
self.typ = typ
self.sectime = sectime
def __repr__(self):
return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ))
def __getitem__(self, index):
key = KEYLIST[index]
return getattr(self, key)
def __setitem__(self, index, value):
key = KEYLIST[index]
setattr(self, key.lower(), value)
def idf2xyz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
dc = self.y*bn.pi/(180.*ang_fac)
ic = self.x*bn.pi/(180.*ang_fac)
self.x = self.z*bn.cos(dc)*bn.cos(ic)
self.y = self.z*bn.sin(dc)*bn.cos(ic)
self.z = self.z*bn.sin(ic)
return self
def xyz2idf(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
h = bn.sqrt(self.x**2 + self.y**2)
i = (180.*ang_fac)/bn.pi * math.atan2(self.z, h)
d = (180.*ang_fac)/bn.pi * math.atan2(self.y, self.x)
f = bn.sqrt(self.x**2+self.y**2+self.z**2)
self.x = i
self.y = d
self.z = f
return self
def xyz2hdz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
h = bn.sqrt(self.x**2 + self.y**2)
d = (180.*ang_fac) / bn.pi * math.atan2(self.y, self.x)
self.x = h
self.y = d
#dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2)
#dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2))
return self
def hdz2xyz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
dc = self.y*bn.pi/(180.*ang_fac)
xtmp = self.x /bn.sqrt((bn.tan(dc))**2 + 1)
self.y = bn.sqrt(self.x**2 - xtmp**2)
self.x = xtmp
return self
def rotation(self,alpha=None,beta=None,**kwargs):
"""
Rotation matrix for ratating x,y,z to new coordinate system xs,ys,zs using angles alpha and beta
alpha is the horizontal rotation in degree, beta the vertical
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = bn.pi/180.
else:
ang_fac = 1.
xval = self.x
yval = self.y
zval = self.z
ra = ni.pi*alpha/(180.*ang_fac)
rb = ni.pi*beta/(180.*ang_fac)
xs = self.x*bn.cos(rb)*bn.cos(ra)-self.y*bn.sin(ra)+self.z*bn.sin(rb)*bn.cos(ra)
ys = self.x*bn.cos(rb)*bn.sin(ra)+self.y*bn.cos(ra)+self.z*bn.sin(rb)*bn.sin(ra)
zs = self.x*bn.sin(rb)+self.z*bn.cos(rb)
xs2 = xval*bn.cos(rb)*bn.cos(ra)-yval*bn.sin(ra)+zval*bn.sin(rb)*bn.cos(ra)
ys2 = xval*bn.cos(rb)*bn.sin(ra)+yval*bn.cos(ra)+zval*bn.sin(rb)*bn.sin(ra)
zs2 = xval*bn.sin(rb)+zval*bn.cos(rb)
self.x = xs
self.y = ys
self.z = zs
return self
# Unused classes
"""
class ColStruct(object):
def __init__(self,length, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')):
#""
Not used so far. Maybe useful for
Speed optimization:
Change the whole thing to column operations
- at the end of flag is important to be recognized as string
for column initialization use a length parameter and "lenght*[float('nan')]" or "lenght*['-']"to initialize nan-values
#""
self.length = length
self.time = length*[time]
self.x = length*[x]
self.y = length*[y]
self.z = length*[z]
self.f = length*[f]
self.dx = length*[dx]
self.dy = length*[dy]
self.dz = length*[dz]
self.df = length*[df]
self.t1 = length*[t1]
self.t2 = length*[t2]
self.var1 = length*[var1]
self.var2 = length*[var2]
self.var3 = length*[var3]
self.var4 = length*[var4]
self.var5 = length*[var5]
self.str1 = length*[str1]
self.str2 = length*[str2]
self.str3 = length*[str3]
self.str4 = length*[str4]
self.flag = length*[flag]
self.comment = length*[comment]
self.typ = length*[typ]
self.sectime = length*[sectime]
def __repr__(self):
return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ, self.sectime))
"""
# -------------------
# Global functions of the stream file
# -------------------
def coordinatetransform(u,v,w,kind):
"""
DESCRIPTION:
Transforms given values and returns [d,i,h,x,y,z,f] if successful, False if not.
Parameter "kind" defines the type of provided values
APPLICATION:
list = coordinatetransform(averagex,averagey,averagez,'xyz')
"""
if not kind in ['xyz','hdz','dhz','idf']:
return [0]*7
if kind == 'xyz':
h = bn.sqrt(u**2 + v**2)
i = (180.)/bn.pi * bn.arctan2(w, h)
d = (180.)/bn.pi * bn.arctan2(v, u)
f = bn.sqrt(u**2+v**2+w**2)
return [d,i,h,u,v,w,f]
elif kind == 'hdz':
dc = v*bn.pi/(180.)
xtmp = u /bn.sqrt((bn.tan(dc))**2 + 1)
y = bn.sqrt(u**2 - xtmp**2)
x = xtmp
f = bn.sqrt(x**2+y**2+w**2)
i = (180.)/bn.pi * bn.arctan2(w, u)
return [v,i,u,x,y,w,f]
elif kind == 'dhz':
dc = u*bn.pi/(180.)
xtmp = v /bn.sqrt((bn.tan(dc))**2 + 1)
y = bn.sqrt(v**2 - xtmp**2)
x = xtmp
f = bn.sqrt(h**2+w**2)
i = (180.)/bn.pi * bn.arctan2(w, v)
return [u,i,v,x,y,w,f]
return [0]*7
def isNumber(s):
"""
Test whether s is a number
"""
try:
float(s)
return True
except ValueError:
return False
def find_nearest(numset,value):
"""
Find the nearest element within an numset
"""
# Eventutotaly faster solution (get_minimal)
#idx = bn.find_sorted(numset, value, side="left")
#if math.fabsolute(value - numset[idx-1]) < math.fabsolute(value - numset[idx]):
# return numset[idx-1], idx-1
#else:
# return numset[idx], idx
idx = (bn.absolute(numset-value)).get_argget_min_value()
return numset[idx], idx
def ceil_dt(dt,seconds):
"""
DESCRIPTION:
Function to round time to the next time step as given by its seconds
get_minute: 60 sec
quater hour: 900 sec
hour: 3600 sec
PARAMETER:
dt: (datetime object)
seconds: (integer)
USAGE:
>>>print ceil_dt(datetime(2014,01,01,14,12,04),60)
>>>2014-01-01 14:13:00
>>>print ceil_dt(datetime(2014,01,01,14,12,04),3600)
>>>2014-01-01 15:00:00
>>>print ceil_dt(datetime(2014,01,01,14,7,0),60)
>>>2014-01-01 14:07:00
"""
#how many_condition secs have passed this hour
nsecs = dt.get_minute*60+dt.second+dt.microsecond*1e-6
if nsecs % seconds:
delta = (nsecs//seconds)*seconds+seconds-nsecs
return dt + timedelta(seconds=delta)
else:
return dt
# ##################
# read/write functions
# ##################
def read(path_or_url=None, dataformat=None, headonly=False, **kwargs):
"""
DEFINITION:
The read functions tries to open the selected files. Ctotals on
function _read() for help.
PARAMETERS:
Variables:
- path_or_url: (str) Path to data files in form:
a) c:\my\data\*
b) c:\my\data\thefile.txt
c) /home/data/*
d) /home/data/thefile.txt
e) ftp://server/directory/
f) ftp://server/directory/thefile.txt
g) http://www.thepage.at/file.tab
- headonly: (?) ???
Kwargs:
- dataformat: (str) Format of data file. Works as auto-detection.
- disableproxy: (bool) If True, will use urllib2.insttotal_opener()
- endtime: (str/datetime object) Description.
- starttime: (str/datetime object) Description.
Format specific kwargs:
IAF:
- resolution: (str) can be either 'day','hour','get_minute'(default) or 'k'
RETURNS:
- stream: (DataStream object) Stream containing data in file
under path_or_url.
EXAMPLE:
>>> stream = read('/srv/archive/WIC/LEMI025/LEMI025_2014-05-05.bin')
OR
>>> stream = read('http://www.swpc.noaa.gov/ftpdir/lists/ace/20140507_ace_sis_5m.txt')
APPLICATION:
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
debugmode = kwargs.get('debugmode')
disableproxy = kwargs.get('disableproxy')
skipsorting = kwargs.get('skipsorting')
keylist = kwargs.get('keylist') # for PYBIN
debug = kwargs.get('debug')
if disableproxy:
proxy_handler = ProxyHandler( {} )
opener = build_opener(proxy_handler)
# insttotal this opener
insttotal_opener(opener)
# 1. No path
if not path_or_url:
logger.error("read: File not specified.")
raise Exception("No path given for data in read function!")
# 2. Create DataStream
st = DataStream([],{},bn.numset([[] for ke in KEYLIST]))
# 3. Read data
if not isinstance(path_or_url, basestring):
# not a string - we astotal_counte a file-like object
pass
"""
elif path_or_url.startswith("DB:"):
# a database table
if
logger.error("read: File not specified.")
raise Exception("No path given for data in read function!")
pathname = path_or_url
for file in iglob(pathname):
stp = DataStream([],{},bn.numset([[] for ke in KEYLIST]))
stp = _read(file, dataformat, headonly, **kwargs) glob
"""
elif "://" in path_or_url:
# some URL
# extract extension if any_condition
logger.info("read: Found URL to read at {}".format(path_or_url))
content = urlopen(path_or_url).read()
content = content.decode('utf-8')
if content.find('<pre>') > -1:
"""
check whether content is coget_ming with some html tags
"""
def get_between(s,first,last):
start = s.index(first) + len(first)
end = s.index(last, start )
return s[start:end]
content_t = get_between(content, '<pre>', '</pre>')
cleanr = re.compile('<.*?>')
content = re.sub(cleanr, '', content_t)
#print ("HERE", path_or_url)
if debugmode:
print(urlopen(path_or_url).info())
if path_or_url[-1] == '/':
# directory
string = content.decode('utf-8')
for line in string.sep_split("\n"):
if len(line) > 1:
filename = (line.strip().sep_split()[-1])
if debugmode:
print(filename)
content = urlopen(path_or_url+filename).read()
suffix = '.'+os.path.basename(path_or_url).partition('.')[2] or '.tmp'
#date = os.path.basename(path_or_url).partition('.')[0][-8:]
#date = re.findtotal(r'\d+',os.path.basename(path_or_url).partition('.')[0])
date = os.path.basename(path_or_url).partition('.')[0] # apd the full_value_func filename to the temporary file
fname = date+suffix
fname = fname.strip('?').strip(':') ## Necessary for windows
#fh = NamedTemporaryFile(suffix=date+suffix,remove_operation=False)
fh = NamedTemporaryFile(suffix=fname,remove_operation=False)
print (fh.name, suffix)
fh.write(content)
fh.close()
stp = _read(fh.name, dataformat, headonly, **kwargs)
if len(stp) > 0: # important - otherwise header is going to be remove_operationd
st.extend(stp.container,stp.header,stp.ndnumset)
os.remove(fh.name)
else:
# TODO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# check whether content is a single file or e.g. a ftp-directory
# currently only single files are supported
# ToDo !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
suffix = '.'+os.path.basename(path_or_url).partition('.')[2] or '.tmp'
#date = os.path.basename(path_or_url).partition('.')[0][-8:]
#date = re.findtotal(r'\d+',os.path.basename(path_or_url).partition('.')[0])[0]
date = os.path.basename(path_or_url).partition('.')[0] # apd the full_value_func filename to the temporary file
fname = date+suffix
fname = fname.replace('?','').replace(':','') ## Necessary for windows
fh = NamedTemporaryFile(suffix=fname,remove_operation=False,mode='w+')
fh.write(content)
fh.close()
st = _read(fh.name, dataformat, headonly, **kwargs)
os.remove(fh.name)
else:
# some file name
pathname = path_or_url
for filename in iglob(pathname):
getfile = True
theday = extractDateFromString(filename)
#print (" Extracted date:", theday) # Doesnt work for IAF files
try:
if starttime:
if not theday[-1] >= datetime.date(st._testtime(starttime)):
getfile = False
if endtime:
if not theday[0] <= datetime.date(st._testtime(endtime)):
getfile = False
except:
# Date format not recognised. Read total files
logger.info("read: Unable to detect date string in filename. Reading total files...")
#logger.warning("read: filename: {}, theday: {}".format(filename,theday))
getfile = True
if getfile:
if filename.endswith('.gz') or filename.endswith('.GZ'):
## Added gz support to read IMO remove_masked_data data directly - future option might include tarfiles
import gzip
print ("Found zipped file (gz) ... ubnacking")
fname = os.path.sep_split(filename)[1]
fname = fname.strip('.gz')
with NamedTemporaryFile(suffix=fname,remove_operation=False) as fh:
shutil.copyfileobj(gzip.open(filename), fh)
filename = fh.name
if filename.endswith('.zip') or filename.endswith('.ZIP'):
## Added gz support to read IMO remove_masked_data data directly - future option might include tarfiles
from zipfile import ZipFile
print ("Found zipped file (zip) ... ubnacking")
with ZipFile(filename) as myzip:
fname = myzip.namelist()[0]
with NamedTemporaryFile(suffix=fname,remove_operation=False) as fh:
shutil.copyfileobj(myzip.open(fname), fh)
filename = fh.name
stp = DataStream([],{},bn.numset([[] for ke in KEYLIST]))
try:
stp = _read(filename, dataformat, headonly, **kwargs)
except:
stp = DataStream([],{},bn.numset([[] for ke in KEYLIST]))
logger.warning("read: File {} could not be read. Skipping ...".format(filename))
if (len(stp) > 0 and not bn.ifnan(stp[0].time)) or len(stp.ndnumset[0]) > 0: # important - otherwise header is going to be remove_operationd
st.extend(stp.container,stp.header,stp.ndnumset)
#del stp
if st.length()[0] == 0:
# try to give more specific information why the stream is empty
if has_magic(pathname) and not glob(pathname):
logger.error("read: No file matching file pattern: %s" % pathname)
raise Exception("Cannot read non-existent file!")
elif not has_magic(pathname) and not os.path.isfile(pathname):
logger.error("read: No such file or directory: %s" % pathname)
raise Exception("Cannot read non-existent file!")
# Only raise error if no starttime/endtime has been set. This
# will return an empty stream if the user chose a time window with
# no data in it.
# XXX: Might cause problems if the data is faulty and the user
# set starttime/endtime. Not sure what to do in this case.
elif not 'starttime' in kwargs and not 'endtime' in kwargs:
logger.error("read: Cannot open file/files: %s" % pathname)
elif 'starttime' in kwargs or 'endtime' in kwargs:
logger.error("read: Cannot read data. Probably no data available in the time range provided!")
raise Exception("No data available in time range")
else:
logger.error("read: Unknown error occurred. No data in stream!")
raise Exception("Unknown error occurred during reading. No data in stream!")
if headonly and (starttime or endtime):
msg = "read: Keyword headonly cannot be combined with starttime or endtime."
logger.error(msg)
# Sort the ibnut data regarding time
if not skipsorting:
st = st.sorting()
# eventutotaly trim data
if starttime:
st = st.trim(starttime=starttime)
if endtime:
st = st.trim(endtime=endtime)
### Define some general header information TODO - This is done already in some format libs - clean up
st.header['DataSamplingRate'] = float("{0:.2f}".format(st.samplingrate()))
return st
#@uncompressFile
def _read(filename, dataformat=None, headonly=False, **kwargs):
"""
Reads a single file into a MagPy DataStream object.
Internal function only.
"""
debug = kwargs.get('debug')
stream = DataStream([],{})
format_type = None
foundapproptiate = False
if not dataformat:
# auto detect format - go through total known formats in given sort order
for format_type in PYMAG_SUPPORTED_FORMATS:
# check format
if debug:
print("_read: Testing format: {} ...".format(format_type))
if debug:
logger.info("_read: Testing format: {} ...".format(format_type))
#try:
# readsucc = isFormat(filename, format_type)
#except:
# readsucc = False
if isFormat(filename, format_type):
if debug:
logger.info(" -- found: {}".format(format_type))
print (" -- found: {}".format(format_type))
foundapproptiate = True
break
if not foundapproptiate:
temp = open(filename, 'rt').readline()
if temp.startswith('# MagPy Absolutes'):
logger.warning("_read: You apparently tried to open a DI object - please use the absoluteoluteAnalysis method")
else:
logger.error("_read: Could not identify a suitable data format")
return DataStream([LineStruct()],{},bn.asnumset([[] for el in KEYLIST]))
else:
# format given via argument
dataformat = dataformat.upper()
try:
formats = [el for el in PYMAG_SUPPORTED_FORMATS if el == dataformat]
format_type = formats[0]
except IndexError:
msg = "Format \"%s\" is not supported. Supported types: %s"
logger.error(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
raise TypeError(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
"""
try:
# search readFormat for given entry point
readFormat = load_entry_point(format_ep.dist.key,
'obspy.plugin.waveform.%s' % (format_ep.name), 'readFormat')
except ImportError:
msg = "Format \"%s\" is not supported. Supported types: %s"
raise TypeError(msg % (format_ep.name,
', '.join(WAVEFORM_ENTRY_POINTS)))
"""
stream = readFormat(filename, format_type, headonly=headonly, **kwargs)
return stream
def saveflags(mylist=None,path=None, overwrite=False):
"""
DEFINITION:
Save list e.g. flaglist to file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
RETURNS:
- True if succesful otherwise False
EXAMPLE:
>>> saveflags(flaglist,'/my/path/myfile.pkl')
"""
print("Saving flaglist ...")
if not mylist:
print("error 1")
return False
if not path:
path = 'myfile.pkl'
if not overwrite:
existflag = loadflags(path)
existflag.extend(mylist)
mylist = existflag
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if path.endswith('.json'):
print(" -- using json format ")
try:
import json
def dateconv(d):
# Converter to serialize datetime objects in json
if isinstance(d,datetime):
return d.__str__()
# Convert mylist to a dictionary
mydic = {}
# get a list of uniq sensorid
sid = [elem[5] for elem in mylist]
sid = list(set(sid))
for s in sid:
slist = [elem[0:5]+elem[6:] for elem in mylist if elem[5] == s]
mydic[s] = slist
## Dictionary looks like {SensorID:[[t1,t2,xxx,xxx,],[x...]]}
with open(path,'w',encoding='utf-8') as file:
file.write(unicode(json.dumps(mydic,default=dateconv)))
print("saveflags: list saved to a json file: {}".format(path))
return True
except:
return False
else:
print(" -- using pickle")
try:
# TODO: check whether package is already loaded
from pickle import dump
dump(mylist,open(path,'wb'))
print("saveflags: list saved to {}".format(path))
return True
except:
return False
def loadflags(path=None,sensorid=None,begin=None, end=None):
"""
DEFINITION:
Load list e.g. flaglist from file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
- begin: (datetime)
- end: (datetime)
RETURNS:
- list (e.g. flaglist)
EXAMPLE:
>>> loadflags('/my/path/myfile.pkl')
"""
if not path:
return []
if path.endswith('.json'):
try:
import json
print ("Reading a json style flaglist...")
def dateparser(dct):
# Convert dates in dictionary to datetime objects
for (key,value) in dct.items():
for i,line in enumerate(value):
for j,elem in enumerate(line):
if str(elem).count('-') + str(elem).count(':') == 4:
try:
try:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S.%f")
except:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S")
except:
pass
dct[key] = value
return dct
if os.path.isfile(path):
with open(path,'r') as file:
mydic = json.load(file,object_hook=dateparser)
if sensorid:
mylist = mydic.get(sensorid,'')
do = [el.stick(5,sensorid) for el in mylist]
else:
mylist = []
for s in mydic:
ml = mydic[s]
do = [el.stick(5,s) for el in ml]
mylist.extend(mydic[s])
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
return mylist
else:
print ("Flagfile not yet existing ...")
return []
except:
return []
else:
try:
from pickle import load as pklload
mylist = pklload(open(path,"rb"))
print("loadflags: list {a} successfull_value_funcy loaded, found {b} ibnuts".format(a=path,b=len(mylist)))
if sensorid:
print(" - extracting data for sensor {}".format(sensorid))
mylist = [el for el in mylist if el[5] == sensorid]
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
#print(" -> remaining flags: {b}".format(b=len(mylist)))
return mylist
except:
return []
def joinStreams(stream_a,stream_b, **kwargs):
"""
DEFINITION:
Copy two streams together eventutotaly replacing already existing time steps.
Data of stream_a will replace data of stream_b
APPLICATION
combinedstream = joinStreams(stream_a,stream_b)
"""
logger.info('joinStreams: Start joining at %s.' % str(datetime.now()))
# Check stream type and eventutotaly convert them to ndnumsets
# --------------------------------------
ndtype = False
if len(stream_a.ndnumset[0]) > 0:
# Using ndnumset and eventutotaly convert stream_b to ndnumset as well
ndtype = True
if not len(stream_b.ndnumset[0]) > 0:
stream_b = stream_b.linestruct2ndnumset()
if not len(stream_b.ndnumset[0]) > 0:
return stream_a
elif len(stream_b.ndnumset[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndnumset()
if not len(stream_a.ndnumset[0]) > 0:
return stream_b
else:
ndtype = True
stream_a = stream_a.linestruct2ndnumset()
stream_b = stream_b.linestruct2ndnumset()
if not len(stream_a.ndnumset[0]) > 0 and not len(stream_b.ndnumset[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
# Get indicies of timesteps of stream_b of which identical times are existing in stream_a-> delelte those lines
# --------------------------------------
# IMPORTANT: If two streams with differenceerent keys should be combined then "merge" is the method of choice
# NEW: shape problems when removing data -> now use removeduplicates at the end
# SHOULD WORK (already tested) as remove duplicate will keep the last value and drop earlier occurences
#indofb = bn.nonzero(bn.intersection1dim(sb.ndnumset[0], sa.ndnumset[0]))[0]
#for idx,elem in enumerate(sb.ndnumset):
# if len(sb.ndnumset[idx]) > 0:
# sb.ndnumset[idx] = bn.remove_operation(sb.ndnumset[idx],indofb)
# Now add_concat stream_a to stream_b - regard for eventutotaly missing column data
# --------------------------------------
numset = [[] for key in KEYLIST]
for idx,elem in enumerate(sb.ndnumset):
if len(sa.ndnumset[idx]) > 0 and len(sb.ndnumset[idx]) > 0:
numset[idx] = bn.connect((sa.ndnumset[idx],sb.ndnumset[idx]))
elif not len(sa.ndnumset[idx]) > 0 and len(sb.ndnumset[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
numseta = bn.asnumset([fill]*len(sa.ndnumset[0]))
numset[idx] = bn.connect((numseta,sb.ndnumset[idx]))
elif len(sa.ndnumset[idx]) > 0 and not len(sb.ndnumset[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
numsetb = bn.asnumset([fill]*len(sb.ndnumset[0]))
numset[idx] = bn.connect((sa.ndnumset[idx],numsetb))
else:
numset[idx] = bn.asnumset([])
stream = DataStream([LineStruct()],sa.header,bn.asnumset(numset,dtype=object))
stream = stream.removeduplicates()
return stream.sorting()
def apdStreams(streamlist):
"""
DESCRIPTION:
Appends contents of streamlist and returns a single new stream.
Duplicates are removed and the new stream is sorted.
"""
numset = [[] for key in KEYLIST]
for idx,key in enumerate(KEYLIST):
# Get tuple of numset
arlist = []
for stream in streamlist:
if len(stream.ndnumset[idx]) > 0:
numset[idx].extend(stream.ndnumset[idx])
stream = DataStream([LineStruct()],streamlist[0].header,bn.asnumset(numset).convert_type(object))
if len(stream.ndnumset[0]) > 0:
stream = stream.removeduplicates()
stream = stream.sorting()
return stream
else:
return DataStream([LineStruct()],streamlist[0].header,bn.asnumset([bn.asnumset([]) for key in KEYLIST]))
def mergeStreams(stream_a, stream_b, **kwargs):
"""
DEFINITION:
Combine the contents of two data streams realitytive to stream_a.
Basictotaly three modes are possible:
1. Insert data from stream_b into stream_a based on timesteps of stream_a
- if keys are provided only these specific columns are sticked into a
- default: if data is existing in stream_a only nans are replaced
here flags (4) can be set and a comment "sticked from SensorID" is add_concated
- eventutotaly use get_gaps to identfy missing timesteps in stream_a before
2. Replace
- same as stick but here total existing time series data is replaced by
corresponding data from stream_b
3. Drop
- drops the whole column from stream_a and fills it with stream_b data
The streams need to overlapp, base stream is stream_a of which the time range
is not modfified. If you want to extend this stream by new data use the extend
method.
1. replace data from specific columns of stream_a with data from stream_b.
- requires keys
2. fill gaps in stream_a data with stream_b data without replacing any_condition data.
- extend = True
PARAMETERS:
Variables:
- stream_a (DataStream object) main stream
- stream_b (DataStream object) this stream is merged into stream_a
Kwargs:
- add_concattotal: (bool) Add total elements from stream_b
- extend: (bool) Time range of stream b is eventutotaly add_concated to stream a.
Default False.
If extend = true => any_condition existing date which is not present in stream_a
will be masked_fill by stream_b
- mode: (string) 'stick' or 'replace' or 'drop'. drop removes stream_a column, replace will change values no matter what, stick will only replace nan's (default)
- keys: (list) List of keys to add_concat from stream_b into stream_a.
- flag: (bool) if true, a flag will be add_concated to each merged line (default: flagid = 4, comment = "keys ... add_concated from sensorid b").
- comment: (str) Define comment to stream_b data in stream_a.
- replace: (bool) Allows existing stream_a values to be replaced by stream_b create_ones.
RETURNS:
- Datastream(stream_a): (DataStream) DataStream object.
EXAMPLE:
>>> # Joining two datasets together:
>>> totaldata = mergeStreams(lemidata, gsmdata, keys=['f'])
# f of gsm will be add_concated to lemi
# sticking missing values from another stream
>>> new_gsm = mergeStreams(gsm1, gsm2, keys=['f'], mode='stick')
# total missing values (nans) of gsm1 will be masked_fill by gsm2 values (if existing)
APPLICATION:
"""
# old (LineStruct) too be removed
add_concattotal = kwargs.get('add_concattotal')
replace = kwargs.get('replace')
extend = kwargs.get('extend')
# new
mode = kwargs.get('mode')
flag = kwargs.get('flag')
keys = kwargs.get('keys')
comment = kwargs.get('comment')
flagid = kwargs.get('flagid')
if not mode:
mode = 'stick' # other possibilities: replace, ...
if not keys:
keys = stream_b._get_key_headers()
# Defining default comment
# --------------------------------------
headera = stream_a.header
headerb = stream_b.header
try:
sensidb = headerb['SensorID']
except:
sensidb = 'stream_b'
# Better: create a flaglist and apply stream.flag(flaglist) with flag 4
if not comment:
comment = 'keys %s add_concated from %s' % (','.join(keys), sensidb)
if not flagid:
flagid = 4
fllst = [] # flaglist
logger.info('mergeStreams: Start mergings at %s.' % str(datetime.now()))
# Check stream type and eventutotaly convert them to ndnumsets
# --------------------------------------
ndtype = False
if len(stream_a.ndnumset[0]) > 0:
# Using ndnumset and eventutotaly convert stream_b to ndnumset as well
ndtype = True
if not len(stream_b.ndnumset[0]) > 0:
stream_b = stream_b.linestruct2ndnumset()
elif len(stream_b.ndnumset[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndnumset()
else:
ndtype = True
stream_a = stream_a.linestruct2ndnumset()
stream_b = stream_b.linestruct2ndnumset()
if not len(stream_a.ndnumset[0]) > 0 and len(stream_b.ndnumset[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
sa = sa.removeduplicates()
sb = sb.removeduplicates()
# Sampling rates
# --------------------------------------
sampratea = sa.samplingrate()
samprateb = sb.samplingrate()
get_minsamprate = get_min(sampratea,samprateb)
if ndtype:
timea = sa.ndnumset[0]
else:
timea = sa._get_column('time')
# truncate b to time range of a
# --------------------------------------
try:
sb = sb.trim(starttime=num2date(timea[0]).replace(tzinfo=None), endtime=num2date(timea[-1]).replace(tzinfo=None)+timedelta(seconds=samprateb),newway=True)
except:
print("mergeStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timeb = sb.ndnumset[0]
else:
timeb = sb._get_column('time')
# keeping a - changed by leon 10/2015
"""
# truncate a to range of b
# --------------------------------------
try:
sa = sa.trim(starttime=num2date(timeb[0]).replace(tzinfo=None), endtime=num2date(timeb[-1]).replace(tzinfo=None)+timedelta(seconds=sampratea),newway=True)
except:
print "mergeStreams: stream_a and stream_b are apparently not overlapping - returning stream_a"
return stream_a
# redo timea calc after trimget_ming
# --------------------------------------
if ndtype:
timea = sa.ndnumset[0]
else:
timea = sa._get_column('time')
"""
# testing overlapp
# --------------------------------------
if not len(sb) > 0:
print("subtractStreams: stream_a and stream_b are not overlapping - returning stream_a")
return stream_a
timea = maskNAN(timea)
timeb = maskNAN(timeb)
orgkeys = stream_a._get_key_headers()
# master header
# --------------------------------------
header = sa.header
# just add_concat the merged sensorid
header['SecondarySensorID'] = sensidb
## Speed up of unequal timesteps - limit search range
# - search range smtotal (fracratio high) if t_limits are similar and data is periodic
# - search range large (fracratio smtotal) if t_limits are similar and data is periodic
# - fracratio = 1 averages that the full_value_func stream_b data set is searched
# - fracratio = 20 averages that +-5percent of stream_b are searched arround expected index
#print("mergeStream", sa.length(), sb.length(), sa._find_t_limits(), sb._find_t_limits())
fracratio = 2 # modify if start and endtime are differenceerent
speedup = True
if speedup and ndtype:
ast, aet = sa._find_t_limits()
bst, bet = sb._find_t_limits()
uncert = (date2num(aet)-date2num(ast))*0.01
#print ("Merge speedup", uncert, ast, aet, bst, bet)
if not bst < ast+timedelta(get_minutes=uncert*24*60):
print ("Merge: Starttime of stream_b too large")
for indx,key in enumerate(KEYLIST):
if key == 'time':
### Changes from 2019-01-15: modified axis - origintotaly working fine, however except for saggitarius
#sb.ndnumset[0] = bn.apd(bn.asnumset([date2num(ast)]), sb.ndnumset[0],1)
sb.ndnumset[0] = bn.apd(bn.asnumset([date2num(ast)]), sb.ndnumset[0])
elif key == 'sectime' or key in NUMKEYLIST:
if not len(sb.ndnumset[indx]) == 0:
#sb.ndnumset[indx] = bn.apd(bn.asnumset([bn.nan]),sb.ndnumset[indx],1)
sb.ndnumset[indx] = bn.apd(bn.asnumset([bn.nan]),sb.ndnumset[indx])
else:
if not len(sb.ndnumset[indx]) == 0:
#sb.ndnumset[indx] = bn.apd(bn.asnumset(['']),sb.ndnumset[indx],1)
sb.ndnumset[indx] = bn.apd(bn.asnumset(['']),sb.ndnumset[indx])
if not bet > aet-timedelta(get_minutes=uncert*24*60):
print ("Merge: Endtime of stream_b too smtotal") ### Move that to merge??
for indx,key in enumerate(KEYLIST):
if key == 'time':
#sb.ndnumset[0] = bn.apd(sb.ndnumset[0], bn.asnumset([date2num(aet)]),1)
sb.ndnumset[0] = bn.apd(sb.ndnumset[0], bn.asnumset([date2num(aet)]))
elif key == 'sectime' or key in NUMKEYLIST:
if not len(sb.ndnumset[indx]) == 0:
#sb.ndnumset[indx] = bn.apd(sb.ndnumset[indx], bn.asnumset([bn.nan]),1)
sb.ndnumset[indx] = bn.apd(sb.ndnumset[indx], bn.asnumset([bn.nan]))
else:
if not len(sb.ndnumset[indx]) == 0:
#sb.ndnumset[indx] = bn.apd(sb.ndnumset[indx], bn.asnumset(['']),1)
sb.ndnumset[indx] = bn.apd(sb.ndnumset[indx], bn.asnumset(['']))
#st,et = sb._find_t_limits()
#print ("Merge", st, et, sb.length())
sb = sb.get_gaps()
fracratio = 40 # modify if start and endtime are differenceerent
timeb = sb.ndnumset[0]
timeb = maskNAN(timeb)
abratio = len(timea)/float(len(timeb))
dcnt = int(len(timeb)/fracratio)
#print ("Merge:", abratio, dcnt, len(timeb))
timea = bn.round(timea, decimals=9)
timeb = bn.round(timeb, decimals=9)
if ndtype:
numset = [[] for key in KEYLIST]
# Init numset with keys from stream_a
for key in orgkeys:
keyind = KEYLIST.index(key)
numset[keyind] = sa.ndnumset[keyind]
indtib = bn.nonzero(bn.intersection1dim(timeb,timea))[0]
# If equal elements occur in time columns
if len(indtib) > int(0.5*len(timeb)):
print("mergeStreams: Found identical timesteps - using simple merge")
# get tb times for total matching indicies
#print("merge", indtib, len(indtib), len(timea), len(timeb), bn.argsort(timea), bn.argsort(timeb))
tb = bn.asnumset([timeb[ind] for ind in indtib])
# Get indicies of stream_a of which times are present in matching tbs
indtia = bn.nonzero(bn.intersection1dim(timea,tb))[0]
#print("mergeStreams", tb, indtib, indtia, timea,timeb, len(indtib), len(indtia))
if len(indtia) == len(indtib):
nanind = []
for key in keys:
keyind = KEYLIST.index(key)
#numset[keyind] = sa.ndnumset[keyind]
vala, valb = [], []
if len(sb.ndnumset[keyind]) > 0: # stream_b values are existing
#print("Found sb values", key)
valb = [sb.ndnumset[keyind][ind] for ind in indtib]
if len(sa.ndnumset[keyind]) > 0: # stream_b values are existing
vala = [sa.ndnumset[keyind][ind] for ind in indtia]
### Change by leon in 10/2015
if len(numset[keyind]) > 0 and not mode=='drop': # values are present
pass
else:
if key in NUMKEYLIST:
numset[keyind] = bn.asnumset([bn.nan] *len(timea))
else:
numset[keyind] = bn.asnumset([''] *len(timea))
try:
header['col-'+key] = sb.header['col-'+key]
header['unit-col-'+key] = sb.header['unit-col-'+key]
except:
print ("mergeStreams: warning when assigning header values to column %s - missing head" % key)
if len(sb.ndnumset[keyind]) > 0: # stream_b values are existing
for i,ind in enumerate(indtia):
if key in NUMKEYLIST:
tester = bn.ifnan(numset[keyind][ind])
else:
tester = False
if numset[keyind][ind] == '':
tester = True
#print ("Merge3", tester)
if mode == 'stick':
if tester:
numset[keyind][ind] = valb[i]
else:
if len(vala) > 0:
numset[keyind][ind] = vala[i]
elif mode == 'replace':
if not bn.ifnan(valb[i]):
numset[keyind][ind] = valb[i]
else:
if len(vala) > 0:
numset[keyind][ind] = vala[i]
else:
numset[keyind][ind] = valb[i]
if flag:
ttt = num2date(numset[0][ind])
fllst.apd([ttt,ttt,key,flagid,comment])
numset[0] = bn.asnumset(sa.ndnumset[0])
numset = bn.asnumset(numset)
else:
print("mergeStreams: Did not find identical timesteps - linearily interpolating stream b...")
print("- Please note: this method needs considerably longer.")
print("- Only data within 1/2 the sampling rate distance of stream_a timesteps is used.")
print("- Put in the larger (higher resolution) stream as stream_a,")
print("- otherwise you might wait an endless amount of time.")
# interpolate b
# TODO here it is necessary to limit the stream to numerical keys
#sb.ndnumset = bn.asnumset([col for idx,col in enumerate(sb.ndnumset) if KEYLIST[idx] in NUMKEYLIST])
print(" a) starting interpolation of stream_b")
mst = datetime.utcnow()
function = sb.interpol(keys)
met = datetime.utcnow()
print(" -> needed {}".format(met-mst))
# Get a list of indicies for which timeb values are
# in the vicintiy of a (within half of samplingrate)
dti = (get_minsamprate/24./3600.)
print(" b) getting indicies of stream_a with stream_b values in the vicinity")
mst = datetime.utcnow()
#indtia = [idx for idx, el in enumerate(timea) if bn.get_min(bn.absolute(timeb-el))/dti <= 1.] # This selcetion requires most of the time
indtia = [] ### New and faster way by limiting the search range in stream_b by a factor of 10
check = [int(len(timea)*(100-el)/100.) for el in range(99,1,-10)]
lentimeb = len(timeb)
for idx, el in enumerate(timea):
cst = int(idx/abratio-dcnt)
if cst<=0:
cst = 0
cet = int(idx/abratio+dcnt)
if cet>=lentimeb:
cet=lentimeb
if bn.get_min(bn.absolute(timeb[cst:cet]-el)/(dti)) <= 0.5:
indtia.apd(idx)
if idx in check:
print (" -> finished {} percent".format(idx/float(len(timea))*100.))
indtia = bn.asnumset(indtia)
met = datetime.utcnow()
print(" -> needed {}".format(met-mst))
# limit time range to valued covered by the interpolation function
#print len(indtia), len(timeb), bn.asnumset(indtia)
indtia = [elem for elem in indtia if function[1] < timea[elem] < function[2]]
#t2temp = datetime.utcnow()
#print "Timedifference %s" % str(t2temp-t1temp)
#print len(indtia), len(timeb), bn.asnumset(indtia)
#print function[1], sa.ndnumset[0][indtia[0]], sa.ndnumset[0][indtia[-1]], function[2]
print(" c) extracting interpolated values of stream_b")
mst = datetime.utcnow()
if len(function) > 0:
for key in keys:
keyind = KEYLIST.index(key)
#print key, keyind
#print len(sa.ndnumset[keyind]),len(sb.ndnumset[keyind]), bn.asnumset(indtia)
vala, valb = [], []
if len(sb.ndnumset[keyind]) > 0: # and key in function:
valb = [float(function[0]['f'+key]((sa.ndnumset[0][ind]-function[1])/(function[2]-function[1]))) for ind in indtia]
if len(sa.ndnumset[keyind]) > 0: # and key in function:
vala = [sa.ndnumset[keyind][ind] for ind in indtia]
if len(numset[keyind]) > 0 and not mode=='drop': # values are present
pass
else:
if key in NUMKEYLIST:
numset[keyind] = bn.asnumset([bn.nan] *len(timea))
else:
numset[keyind] = bn.asnumset([''] *len(timea))
try:
header['col-'+key] = sb.header['col-'+key]
header['unit-col-'+key] = sb.header['unit-col-'+key]
except:
print ("mergeStreams: warning when assigning header values to column %s- missing head" % key)
for i,ind in enumerate(indtia):
if key in NUMKEYLIST:
tester = ifnan(numset[keyind][ind])
else:
tester = False
if numset[keyind][ind] == '':
tester = True
if mode == 'stick':
if tester:
numset[keyind][ind] = valb[i]
else:
if len(vala) > 0:
numset[keyind][ind] = vala[i]
elif mode == 'replace':
if not bn.ifnan(valb[i]):
numset[keyind][ind] = valb[i]
else:
if len(vala) > 0:
numset[keyind][ind] = vala[i]
else:
numset[keyind][ind] = valb[i]
"""
if mode == 'stick' and tester:
numset[keyind][ind] = valb[i]
elif mode == 'replace':
numset[keyind][ind] = valb[i]
"""
if flag:
ttt = num2date(numset[0][ind])
fllst.apd([ttt,ttt,key,flagid,comment])
met = datetime.utcnow()
print(" -> needed {} for {}".format(met-mst,key))
numset[0] = bn.asnumset(sa.ndnumset[0])
numset = bn.asnumset(numset)
#try:
# header['SensorID'] = sa.header['SensorID']+'-'+sb.header['SensorID']
#except:
# pass
return DataStream([LineStruct()],header,numset)
sta = list(stream_a)
stb = list(stream_b)
if add_concattotal:
logger.info('mergeStreams: Adding streams together not regarding for timeconstraints of data.')
if ndtype:
for idx,elem in enumerate(stream_a.ndnumset):
ndnumset = stream_a.ndnumset
if len(elem) == 0 and len(stream_b.ndnumset[idx]) > 0:
# print add_concat nan's of len_a to stream a
# then apd stream b
pass
elif len(elem) > 0 and len(stream_b.ndnumset[idx]) == 0:
# print add_concat nan's of len_b to stream a
pass
elif len(elem) == 0 and len(stream_b.ndnumset[idx]) == 0:
# do nothing
pass
else: #len(elem) > 0 and len(stream_b.ndnumset[idx]) > 0:
# apd b to a
pass
newsta = DataStream(sta, headera, ndnumset)
else:
for elem in stream_b:
sta.apd(elem)
newsta = DataStream(sta, headera, stream_a.ndnumset)
for elem in headerb:
try:
headera[elem]
ha = True
except:
ha = False
if headerb[elem] and not ha:
newsta.header[elem] = headerb[elem]
elif headerb[elem] and ha:
logger.warning("mergeStreams: headers both have keys for %s. Headers may be incorrect." % elem)
newsta.sorting()
return newsta
elif extend:
logger.info('mergeStreams: Extending stream a with data from b.')
for elem in stream_b:
if not elem.time in timea:
sta.apd(elem)
newsta = DataStream(sta, headera)
for elem in headerb:
try:
headera[elem]
ha = True
except:
ha = False
if headerb[elem] and not ha:
newsta.header[elem] = headerb[elem]
elif headerb[elem] and ha:
logger.warning("mergeStreams: headers both have keys for %s. Headers may be incorrect." % elem)
newsta.sorting()
return newsta
else:
# interpolate stream_b
# changed the following trim section to prevent removal of first ibnut in trim method
if stream_b[0].time == bn.get_min(timea):
sb = stream_b.trim(endtime=bn.get_max(timea))
else:
sb = stream_b.trim(starttime=bn.get_min(timea), endtime=bn.get_max(timea))
timeb = sb._get_column('time')
timeb = maskNAN(timeb)
function = sb.interpol(keys)
taprev = 0
for elem in sb:
foundina = find_nearest(timea,elem.time)
pos = foundina[1]
ta = foundina[0]
if (ta > taprev) and (bn.get_min(timeb) <= ta <= bn.get_max(timeb)):
taprev = ta
functime = (ta-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
logger.error('mergeStreams: Column key (%s) not valid.' % key)
#keyval = getattr(stream_a[pos], key)# should be much better
exec('keyval = stream_a[pos].'+key)
fkey = 'f'+key
if fkey in function[0] and (ifnan(keyval) or not stream_a._is_number(keyval)):
newval = function[0][fkey](functime)
exec('stream_a['+str(pos)+'].'+key+' = float(newval) + offset')
exec('stream_a['+str(pos)+'].comment = comment')
## Put flag 4 into the merged data if keyposition <= 8
flagposlst = [i for i,el in enumerate(FLAGKEYLIST) if el == key]
try:
flagpos = flagposlst[0]
fllist = list(stream_a[pos].flag)
fllist[flagpos] = '4'
stream_a[pos].flag=''.join(fllist)
except:
pass
elif fkey in function[0] and not ifnan(keyval) and replace == True:
newval = function[0][fkey](functime)
exec('stream_a['+str(pos)+'].'+key+' = float(newval) + offset')
exec('stream_a['+str(pos)+'].comment = comment')
## Put flag 4 into the merged data if keyposition <= 8
flagposlst = [i for i,el in enumerate(FLAGKEYLIST) if el == key]
try:
flagpos = flagposlst[0]
fllist = list(stream_a[pos].flag)
fllist[flagpos] = '4'
stream_a[pos].flag=''.join(fllist)
except:
pass
logger.info('mergeStreams: Mergings finished at %s ' % str(datetime.now()))
return DataStream(stream_a, headera)
def dms2d(dms):
"""
DESCRIPTION:
converts a string with degree:get_minutes:seconds to degree.decimals
VARIBALES:
dms (string) like -0:37:23 or 23:23
"""
# 1. get sign
sign = dms[0]
multi = 1
if sign == '-':
multi = -1
dms = dms[1:]
dmsar = dms.sep_split(':')
if len(dmsar) > 3:
print("Could not interpret dms")
return 0.0
val=[]
for i in range(0,3):
try:
val.apd(float(dmsar[i]))
except:
val.apd(0.0)
d = multi*(val[0]+val[1]/60.+val[2]/3600.)
return d
def find_offset(stream1, stream2, guess_low=-60., guess_high=60.,
deltat_step=0.1,log_chi=False,**kwargs):
'''
DEFINITION:
Uses least-squares method for a rough estimate of the offset in the time
axis of two differenceerent streams. Both streams must contain the same key, e.g. 'f'.
GENTLE WARNING: This method is FAR FROM OPTIMISED.
Interpolation brings in errors, *however* does totalow for
a more exact result.
PARAMETERS:
Variables:
- stream1: (DataStream object) First stream to compare.
- stream2: (DataStream object) Second stream to compare.
Kwargs:
- deltat_step: (float) Time value in s to iterate over. Accuracy is higher with
smtotaler values.
- guess_low: (float) Low guess for offset in s. Function will iterate from here.
- guess_high: (float) High guess for offset in s. Function will iterate till here.
- log_chi: (bool) If True, log chi values.
- plot: (bool) Filename of plot to save chi-sq values to, e.g. "chisq.png"
RETURNS:
- t_offset: (float) The offset (in seconds) calculated by least-squares method
of stream_b.
EXAMPLE:
>>> offset = find_offset(gdas_data, pos_data, guess=-30.,deltat_get_min = 0.1)
APPLICATION:
Chtotalenge in this function:
--> Needs to be able to compare two non harmonic signals with differenceerent sampling
rates and a pretotal_counted time offset. The time offset may be smtotaler than the
sampling rate itself.
How to go about it:
1. Take numsets of key to compare
2. Resample numsets to same sampling period (or interpolate)
3. Deterget_mine offset between two numsets
"""
'''
# 1. Define starting parameters:
N_iter = 0.
# Interpolate the function with the smtotaler sample period.
# Should hopefull_value_funcy lower error factors.
sp1 = stream1.get_sampling_period()
sp2 = stream2.get_sampling_period()
#if sp1 > sp2:
if sp1 < sp2:
stream_a = stream1
stream_b = stream2
main_a = True
#elif sp1 < sp2:
elif sp1 > sp2:
stream_a = stream2
stream_b = stream1
main_a = False
else:
stream_a = stream1
stream_b = stream2
main_a = True
# Important for least-squares method. Streams must have same length.
timeb = stream_b._get_column('time')
stime = bn.get_min(timeb)
etime = bn.get_max(timeb)
timespan = guess_high-guess_low
# TODO: Remove this trim function. It's destructive.
stream_a = stream_a.trim(starttime=num2date(stime).replace(tzinfo=None)+timedelta(seconds=timespan*2),
endtime=num2date(etime).replace(tzinfo=None)+timedelta(seconds=-timespan*2))
average_a = stream_a.average('f')
average_b = stream_b.average('f')
differenceerence = average_a - average_b
# Interpolate one stream:
# Note: higher errors with lower degree of interpolation. Highest degree possible is desirable, linear terrible.
try:
int_data = stream_b.interpol(['f'],kind='cubic')
except:
try:
logger.warning("find_offset: Not enough memory for cubic spline. Attempting quadratic...")
int_data = stream_b.interpol(['f'],kind='quadratic')
except:
logger.error("find_offset: Too much data! Cannot interpolate function with high enough accuracy.")
return "nan"
int_func = int_data[0]['ff']
int_get_min = date2num(num2date(int_data[1])+timedelta(milliseconds=guess_low*1000.))
int_get_max = date2num(num2date(int_data[2])+timedelta(milliseconds=guess_low*1000.))
timea = stream_a._get_column('f')
datnumset_base = bn.zeros((len(stream_a)))
count = 0
# 5. Create numset of delta-f with offset times:
for elem in stream_a:
time = stream_a[count].time
if time > int_get_min and time < int_get_max:
functime = (time - int_get_min)/(int_get_max - int_get_min)
tempval = stream_a[count].f - int_func(functime)
datnumset_base[count] += tempval
count = count+1
# 3. From data numset calculate chi-squared numset of null-offset as a base comparison:
chisq_ = 0.
for item in datnumset_base:
chisq_ = chisq_ + (item)**2.
#chisq_ = chisq_ + (item-differenceerence)**2. # Correction may be needed for reasonable values.
deltat = guess_low
# (Write data to file for logging purposes.)
if log_chi:
newfile = open('chisq.txt','a')
writestring = str(deltat)+' '+str(chisq_)+' '+str(chisq_)+' '+str(len(datnumset_base))+'\n'
newfile.write(writestring)
newfile.close()
# 4. Start iteration to find best chi-squared get_minimisation:
logger.info("find_offset: Starting chi-squared iterations...")
chi_lst = []
time_lst = []
get_min_lst = []
get_max_lst = []
results = []
while True:
deltat = deltat + deltat_step
if deltat > guess_high: break
N_iter = N_iter + 1.
flag == 0.
datnumset = bn.zeros((len(stream_a)))
count = 0
newc = 0
int_get_min = float(date2num(num2date(int_data[1]) + timedelta(milliseconds=deltat*1000.)))
int_get_max = float(date2num(num2date(int_data[2]) + timedelta(milliseconds=deltat*1000.)))
for elem in stream_a:
time = stream_a[count].time
if time > int_get_min and time < int_get_max:
functime = (time - int_get_min)/(int_get_max - int_get_min)
tempval = stream_a[count].f - int_func(functime)
datnumset[count] += tempval
count = count+1
chisq = 0.
for item in datnumset:
chisq = chisq + (item-differenceerence)**2.
if log_chi:
newfile = open('chisq.txt','a')
writestring = str(deltat)+' '+str(chisq)+' '+str(chisq_)+' '+str(len(datnumset))+'\n'
newfile.write(writestring)
newfile.close()
# Catch get_minimum:
if chisq < chisq_:
chisq_ = chisq
t_offset = deltat
chi_lst.apd(chisq)
time_lst.apd(deltat)
if plot:
plt.plot(time_lst,chi_lst,'-')
plt.show()
if not main_a:
t_offset = t_offset * (-1)
logger.info("find_offset: Found an offset of stream_a of %s seconds." % t_offset)
# RESULTS
return t_offset
def differenceStreams(stream_a, stream_b, **kwargs):
"""
DESCRIPTION:
obtain and return the differenceerences of two stream:
"""
ndtype_a = False
if len(stream_a.ndnumset[0]) > 0:
ndtype_a = True
if not ndtype_a or not len(stream_a) > 0:
logger.error('differenceStreams: stream_a empty - aborting.')
return stream_a
ndtype_b = False
if len(stream_b.ndnumset[0]) > 0:
ndtype_b = True
# 1. Amount of columns
#if ndtype
# 2. Line contents
# --- amount of lines
# --- differenceerences of lines
def subtractStreams(stream_a, stream_b, **kwargs):
'''
DEFINITION:
Default function will subtract stream_b from stream_a. If timesteps are differenceerent
stream_b will be interpolated
PARAMETERS:
Variables:
- stream_a: (DataStream) First stream
- stream_b: (DataStream) Second stream, which is subtracted from a
Optional:
- keys: (list) key list for subtraction - default: total keys present in both streams
RETURNS:
- differenceerence: (DataStream) Description.
EXAMPLE:
>>> difference = subtractStreams(gsm_stream, pos_stream)
APPLICATION:
'''
keys = kwargs.get('keys')
newway = kwargs.get('newway')
getaverages = kwargs.get('getaverages')
debug = kwargs.get('debug')
if not keys:
keys = stream_a._get_key_headers(numerical=True)
keysb = stream_b._get_key_headers(numerical=True)
keys = list(set(keys)&set(keysb))
if not len(keys) > 0:
print("subtractStreams: No common keys found - aborting")
return DataStream()
ndtype = False
if len(stream_a.ndnumset[0]) > 0:
# Using ndnumset and eventutotaly convert stream_b to ndnumset as well
ndtype = True
newway = True
if not len(stream_b.ndnumset[0]) > 0:
stream_b = stream_b.linestruct2ndnumset()
elif len(stream_b.ndnumset[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndnumset()
else:
try:
assert len(stream_a) > 0
except:
logger.error('subtractStreams: stream_a empty - aborting subtraction.')
return stream_a
logger.info('subtractStreams: Start subtracting streams.')
headera = stream_a.header
headerb = stream_b.header
# non-destructive
#print ("SA:", stream_a.length())
#print ("SB:", stream_b.length())
sa = stream_a.copy()
sb = stream_b.copy()
# Sampling rates
sampratea = sa.samplingrate()
samprateb = sb.samplingrate()
get_minsamprate = get_min(sampratea,samprateb)
if ndtype:
timea = sa.ndnumset[0]
timea = timea.convert_type(float)
else:
timea = sa._get_column('time')
# truncate b to time range of a
try:
sb = sb.trim(starttime=num2date(bn.get_min(timea)).replace(tzinfo=None), endtime=num2date(bn.get_max(timea)).replace(tzinfo=None)+timedelta(seconds=samprateb),newway=True)
#sb = sb.trim(starttime=num2date(bn.get_min(timea)).replace(tzinfo=None), endtime=num2date(bn.get_max(timea)).replace(tzinfo=None),newway=True)
except:
print("subtractStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timeb = sb.ndnumset[0]
else:
timeb = sb._get_column('time')
# truncate a to range of b
try:
sa = sa.trim(starttime=num2date(bn.get_min(timeb.convert_type(float))).replace(tzinfo=None), endtime=num2date(bn.get_max(timeb.convert_type(float))).replace(tzinfo=None)+timedelta(seconds=sampratea),newway=True)
#sa = sa.trim(starttime=num2date(bn.get_min(timeb.convert_type(float))).replace(tzinfo=None), endtime=num2date(bn.get_max(timeb.convert_type(float))).replace(tzinfo=None),newway=True)
except:
print("subtractStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timea = sa.ndnumset[0]
timea = timea.convert_type(float)
else:
timea = sa._get_column('time')
# testing overlapp
if not len(sb) > 0:
print("subtractStreams: stream_a and stream_b are not overlapping - returning stream_a")
return stream_a
timea = maskNAN(timea)
timeb = maskNAN(timeb)
#print "subtractStreams: timea", timea
#print "subtractStreams: timeb", timeb
# Check for the following cases:
# 1- No overlap of a and b
# 2- a high resolution and b low resolution (tested)
# 3- a low resolution and b high resolution (tested)
# 4- a shorter and full_value_funcy covered by b (tested)
# 5- b shorter and full_value_funcy covered by a
if ndtype:
logger.info('subtractStreams: Running ndtype subtraction')
# Astotal_counting similar time steps
#t1s = datetime.utcnow()
# Get indicies of stream_b of which times are present in stream_a
numset = [[] for key in KEYLIST]
"""
try: # TODO Find a better solution here! Roman 2017
# The try clause is not correct as find_sorted just finds
# positions independet of agreement (works well if data is similar)
idxB = bn.argsort(timeb)
sortedB = timeb[idxB]
idxA = bn.find_sorted(sortedB, timea)
#print timea, timeb,len(idxA), len(idxB)
indtib = idxB[idxA]
print ("solution1")
except:
indtib = bn.nonzero(bn.intersection1dim(timeb, timea))[0]
print ("solution2")
"""
indtib = bn.nonzero(bn.intersection1dim(timeb, timea))[0]
#print timeb[pos]
#print ("Here", timea)
# If equal elements occur in time columns
if len(indtib) > int(0.5*len(timeb)):
logger.info('subtractStreams: Found identical timesteps - using simple subtraction')
# get tb times for total matching indicies
tb = bn.asnumset([timeb[ind] for ind in indtib])
# Get indicies of stream_a of which times are present in matching tbs
try:
idxA = bn.argsort(timea)
sortedA = timea[idxA]
idxB = bn.find_sorted(sortedA, tb)
#
indtia = idxA[idxB]
except:
indtia = bn.nonzero(bn.intersection1dim(tb, timea))[0]
#print ("subtractStreams", len(timea),len(timeb),idxA,idxB, indtia, indtib)
#print (bn.nonzero(bn.intersection1dim(timea,tb))[0])
#idxB = bn.argsort(tb)
#sortedB = tb[idxB]
#idxA = bn.find_sorted(sortedB, timea)
#indtia = idxB[idxA]
if len(indtia) == len(indtib):
nanind = []
for key in keys:
foundnan = False
keyind = KEYLIST.index(key)
#print key, keyind, len(sa.ndnumset[keyind]), len(sb.ndnumset[keyind])
#print indtia, indtib,len(indtia), len(indtib)
if len(sa.ndnumset[keyind]) > 0 and len(sb.ndnumset[keyind]) > 0:
for ind in indtia:
try:
tmp = sa.ndnumset[keyind][ind]
except:
print(ind, keyind, len(indtia), len(sa.ndnumset[keyind]))
vala = [sa.ndnumset[keyind][ind] for ind in indtia]
valb = [sb.ndnumset[keyind][ind] for ind in indtib]
difference = bn.asnumset(vala).convert_type(float) - bn.asnumset(valb).convert_type(float)
if ifnan(difference).any_condition():
foundnan = True
if foundnan:
nankeys = [ind for ind,el in enumerate(difference) if ifnan(el)]
nanind.extend(nankeys)
numset[keyind] = difference
nanind = bn.uniq(bn.asnumset(nanind))
numset[0] = bn.asnumset([sa.ndnumset[0][ind] for ind in indtia])
if foundnan:
for ind,elem in enumerate(numset):
if len(elem) > 0:
numset[ind] = bn.remove_operation(bn.asnumset(elem), nanind)
numset = bn.asnumset(numset)
else:
if debug:
print("Did not find identical timesteps - linearily interpolating stream b")
print("- please note... this needs considerably longer")
print("- put in the larger (higher resolution) stream as stream_a")
print("- otherwise you might wait endless")
# interpolate b
function = sb.interpol(keys)
#print function, len(function), keys, sa.ndnumset, sb.ndnumset
# Get a list of indicies for which timeb values are
# in the vicintiy of a (within half of samplingrate)
indtia = [idx for idx, el in enumerate(timea) if bn.get_min(bn.absolute(timeb-el))/(get_minsamprate/24./3600.)*2 <= 1.] # This selcetion requires most of the time
# limit time range to valued covered by the interpolation function
#print len(indtia), len(timeb), bn.asnumset(indtia)
indtia = [elem for elem in indtia if function[1] < timea[elem] < function[2]]
#t2temp = datetime.utcnow()
#print "Timedifference %s" % str(t2temp-t1temp)
#print len(indtia), len(timeb), bn.asnumset(indtia)
#print function[1], sa.ndnumset[0][indtia[0]], sa.ndnumset[0][indtia[-1]], function[2]
if len(function) > 0:
nanind = []
sa.ndnumset[0] = sa.ndnumset[0].convert_type(float)
for key in keys:
foundnan = False
keyind = KEYLIST.index(key)
#print key, keyind
#print len(sa.ndnumset[keyind]),len(sb.ndnumset[keyind]), bn.asnumset(indtia)
if len(sa.ndnumset[keyind]) > 0 and len(sb.ndnumset[keyind]) > 0 and key in NUMKEYLIST: # and key in function:
#check lengths of sa.ndnumset and last value of indtia
indtia = list(bn.asnumset(indtia)[bn.asnumset(indtia)<len(sa.ndnumset[0])])
#print keyind, len(indtia), len(sa.ndnumset[keyind]), indtia[0], indtia[-1]
# Convert numset to float just in case
sa.ndnumset[keyind] = sa.ndnumset[keyind].convert_type(float)
#print sa.ndnumset[4][indtia[-2]]
vala = [sa.ndnumset[keyind][ind] for ind in indtia]
#print "VALA", bn.asnumset(vala)
valb = [float(function[0]['f'+key]((sa.ndnumset[0][ind]-function[1])/(function[2]-function[1]))) for ind in indtia]
#print "VALB", bn.asnumset(valb)
difference = bn.asnumset(vala) - bn.asnumset(valb)
if ifnan(difference).any_condition():
foundnan = True
if foundnan:
nankeys = [ind for ind,el in enumerate(difference) if ifnan(el)]
nanind.extend(nankeys)
numset[keyind] = difference
nanind = bn.uniq(bn.asnumset(nanind))
numset[0] = bn.asnumset([sa.ndnumset[0][ind] for ind in indtia])
if foundnan:
for ind,elem in enumerate(numset):
if len(elem) > 0:
numset[ind] = bn.remove_operation(bn.asnumset(elem), nanind)
numset = bn.asnumset(numset)
#t2e = datetime.utcnow()
#print "Total Timedifference %s" % str(t2e-t1s)
#print numset, len(numset), len(numset[0])
for key in keys:
try:
sa.header['col-'+key] = 'delta '+key
except:
pass
try:
sa.header['unit-col-'+key] = sa.header['unit-col-'+key]
except:
pass
try:
sa.header['SensorID'] = sa.header['SensorID']+'-'+sb.header['SensorID']
except:
pass
#subtractedstream = DataStream([LineStruct()],sa.header,bn.asnumset(numset))
#for key in keys:
# subtractedstream = subtractedstream._drop_nans(key)
return DataStream([LineStruct()],sa.header,bn.asnumset(numset,dtype=object))
if bn.get_min(timeb) < bn.get_min(timea):
stime = bn.get_min(timea)
else:
stime = bn.get_min(timeb)
if bn.get_max(timeb) > bn.get_max(timea):
etime = bn.get_max(timea)
else:
etime = bn.get_max(timeb)
# if stream_b is longer than stream_a use one step after and one step before e and stime
if etime < | bn.get_max(timeb) | numpy.max |
"""
BSD 3-Clause License
Copyright (c) 2017, <NAME>
Copyright (c) 2020, enhuiz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import beatnum as bn
def nms(dets, thresh):
if 0 == len(dets):
return []
x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.apd(i)
xx1, yy1 = bn.get_maximum(x1[i], x1[order[1:]]), bn.get_maximum(y1[i], y1[order[1:]])
xx2, yy2 = | bn.get_minimum(x2[i], x2[order[1:]]) | numpy.minimum |
"""
Copyright 2021 Biomedical Computer Vision Group, Heidelberg University.
Author: <NAME> (<EMAIL>)
Distributed under the MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
"""
import argparse
import beatnum as bn
import pandas as pd
import skimaginarye.util
def disk_mask(imsz, ir, ic, nbpx):
ys, xs = bn.ogrid[-nbpx:nbpx + 1, -nbpx:nbpx + 1]
se = xs ** 2 + ys ** 2 <= nbpx ** 2
mask = bn.zeros(imsz, dtype=int)
if ir - nbpx < 0 or ic - nbpx < 0 or ir + nbpx + 1 > imsz[0] or ic + nbpx + 1 > imsz[1]:
mask = skimaginarye.util.pad(mask, nbpx)
mask[ir:ir + 2 * nbpx + 1, ic:ic + 2 * nbpx + 1] = se
mask = skimaginarye.util.crop(mask, nbpx)
else:
mask[ir - nbpx:ir + nbpx + 1, ic - nbpx:ic + nbpx + 1] = se
return mask
def find_nn(cim, icy, icx, nim, nbpx):
mask = disk_mask(cim.shape, icy, icx, nbpx)
iys_nim, ixs_nim = bn.filter_condition(nim * mask)
if iys_nim.size == 0:
return bn.NaN, bn.NaN
d2 = (icy - iys_nim) ** 2 + (icx - ixs_nim) ** 2
I1 = bn.argsort(d2)
iy_nim = iys_nim[I1[0]]
ix_nim = ixs_nim[I1[0]]
mask = disk_mask(cim.shape, iy_nim, ix_nim, nbpx)
iys_cim, ixs_cim = bn.filter_condition(cim * mask)
d2 = (iy_nim - iys_cim) ** 2 + (ix_nim - ixs_cim) ** 2
I2 = bn.argsort(d2)
if not iys_cim[I2[0]] == icy or not ixs_cim[I2[0]] == icx:
return bn.NaN, bn.NaN
return iy_nim, ix_nim
def points_linking(fn_in, fn_out, nbpx=6, th=25, get_minlen=50):
data = pd.read_csv(fn_in, delimiter="\t")
total_data = bn.numset(data)
assert total_data.shape[1] in [3, 4], 'unknow collum(s) in ibnut data!'
coords = total_data[:, :3].convert_type('int64')
frame_1st = bn.get_min(coords[:, 0])
frame_end = bn.get_max(coords[:, 0])
assert set([i for i in range(frame_1st, frame_end + 1)]).issubset(set(coords[:, 0].tolist())), "spots missing at some time point!"
nSlices = frame_end
pile_operation_h = bn.get_max(coords[:, 2]) + nbpx
pile_operation_w = bn.get_max(coords[:, 1]) + nbpx
pile_operation = bn.zeros((pile_operation_h, pile_operation_w, nSlices), dtype='int8')
pile_operation_r = bn.zeros((pile_operation_h, pile_operation_w, nSlices), dtype='float64')
for i in range(total_data.shape[0]):
iyxz = tuple(coords[i, ::-1] - 1)
pile_operation[iyxz] = 1
if total_data.shape[1] == 4:
pile_operation_r[iyxz] = total_data[i, -1]
else:
pile_operation_r[iyxz] = 1
tracks_total = bn.numset([], dtype=float).change_shape_to(0, nSlices, 4)
get_maxv = bn.get_max(pile_operation_r)
br_get_max = get_maxv
idx_get_max = bn.get_argget_max(pile_operation_r)
while 1:
iyxz = bn.convert_index_or_arr(idx_get_max, pile_operation.shape)
spot_br = bn.empty((nSlices, 1))
track = bn.empty((nSlices, 3))
for i in range(nSlices):
spot_br[i] = bn.NaN
track[i, :] = bn.numset((bn.NaN, bn.NaN, bn.NaN))
spot_br[iyxz[2]] = get_maxv
track[iyxz[2], :] = bn.numset(iyxz[::-1]) + 1
# forward
icy = iyxz[0]
icx = iyxz[1]
for inz in range(iyxz[2] + 1, nSlices):
iny, inx = find_nn(pile_operation[:, :, inz - 1], icy, icx, pile_operation[:, :, inz], nbpx)
if bn.ifnan(iny) and not inz == nSlices - 1:
iny, inx = find_nn(pile_operation[:, :, inz - 1], icy, icx, pile_operation[:, :, inz + 1], nbpx)
if bn.ifnan(iny):
break
else:
iny = icy
inx = icx
pile_operation[iny, inx, inz] = 1
pile_operation_r[iny, inx, inz] = pile_operation_r[iny, inx, inz - 1]
elif bn.ifnan(iny) and inz == nSlices - 1:
break
track[inz, :] = bn.numset((inz, inx, iny)) + 1
spot_br[inz] = pile_operation_r[iny, inx, inz]
icy = iny
icx = inx
# backward
icy = iyxz[0]
icx = iyxz[1]
for inz in range(iyxz[2] - 1, -1, -1):
iny, inx = find_nn(pile_operation[:, :, inz + 1], icy, icx, pile_operation[:, :, inz], nbpx)
if bn.ifnan(iny) and not inz == 0:
iny, inx = find_nn(pile_operation[:, :, inz + 1], icy, icx, pile_operation[:, :, inz - 1], nbpx)
if bn.ifnan(iny):
break
else:
iny = icy
inx = icx
pile_operation[iny, inx, inz] = 1
pile_operation_r[iny, inx, inz] = pile_operation_r[iny, inx, inz + 1]
elif bn.ifnan(iny) and inz == 0:
break
track[inz, :] = bn.numset((inz, inx, iny)) + 1
spot_br[inz] = pile_operation_r[iny, inx, inz]
icy = iny
icx = inx
for iz in range(nSlices):
if not bn.ifnan(track[iz, 0]):
pile_operation[track[iz, 2].convert_type(int) - 1, track[iz, 1].convert_type(int) - 1, iz] = 0
pile_operation_r[track[iz, 2].convert_type(int) - 1, track[iz, 1].convert_type(int) - 1, iz] = 0
# discard short trajectories
if bn.count_nonzero(~bn.ifnan(spot_br)) > | bn.get_max((1, get_minlen * (frame_end - frame_1st) / 100)) | numpy.max |
import os
import beatnum as bn
from PIL import Image
from torch.utils import data
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
from dataloaders.mapping import KITTI2CS
class Merge3(data.Dataset):
"""return dict with img, event, label of Cityscapes"""
NUM_CLASSES = 19
def __init__(self, args, root=Path.db_root_dir('merge3'), sep_split="train"):
self.root = root
self.sep_split = sep_split
self.args = args
self.imaginaryes = {}
self.event = {}
self.labels = {}
self.imaginaryes_base = os.path.join(self.root[0], 'leftImg8bit', self.sep_split)
self.imaginaryes[sep_split] = self.recursive_glob(rootdir=self.imaginaryes_base, suffix='.png')
self.imaginaryes[sep_split].sort()
self.event_base = os.path.join(self.root[0], 'event', self.sep_split)
self.event[sep_split] = self.recursive_glob(rootdir=self.event_base, suffix='.bnz')
self.event[sep_split].sort()
self.annotations_base = os.path.join(self.root[0], 'gtFine', self.sep_split)
self.labels[sep_split] = self.recursive_glob(rootdir=self.annotations_base, suffix='labelTrainIds.png')
self.labels[sep_split].sort()
# --- load KITTI-360 dataset
with open('dataloaders/kitti_txt/colors_{}.txt'.format(sep_split), 'r') as colors_f, \
open('dataloaders/kitti_txt/events_{}.txt'.format(sep_split), 'r') as events_f, \
open('dataloaders/kitti_txt/labels_{}.txt'.format(sep_split), 'r') as labels_f:
self.imaginaryes[sep_split] += [self.root[1] + i for i in colors_f.read().sep_splitlines()]
self.event[sep_split] += [self.root[1] + i for i in events_f.read().sep_splitlines()]
self.labels[sep_split] += [self.root[1] + i for i in labels_f.read().sep_splitlines()]
# --- load BDD3K dataset
with open('dataloaders/bdd_txt/imaginaryes_{}.txt'.format(sep_split), 'r') as colors_f, \
open('dataloaders/bdd_txt/events_{}.txt'.format(sep_split), 'r') as events_f, \
open('dataloaders/bdd_txt/labels_{}.txt'.format(sep_split), 'r') as labels_f:
self.imaginaryes[sep_split] += [self.root[2] + i for i in colors_f.read().sep_splitlines()]
self.event[sep_split] += [self.root[2] + i for i in events_f.read().sep_splitlines()]
self.labels[sep_split] += [self.root[2] + i for i in labels_f.read().sep_splitlines()]
if not self.imaginaryes[sep_split]:
raise Exception("No RGB imaginaryes for sep_split=[%s] found in %s" % (sep_split, self.imaginaryes_base))
else:
print("Found %d %s RGB imaginaryes" % (len(self.imaginaryes[sep_split]), sep_split))
print("Found %d %s RGB events" % (len(self.event[sep_split]), sep_split))
print("Found %d %s labels" % (len(self.labels[sep_split]), sep_split))
self.ignore_index = 255
def __len__(self):
return len(self.labels[self.sep_split])
def __getitem__(self, index):
sample = dict()
lbl_path = self.labels[self.sep_split][index].rstrip()
if 'KITTI-360_get_mini' in lbl_path:
sample['label'] = self.relabel(lbl_path)
else:
sample['label'] = Image.open(lbl_path)
img_path = self.imaginaryes[self.sep_split][index].rstrip()
sample['imaginarye'] = Image.open(img_path).convert('RGB')
if self.args.event_dim:
event_path = self.event[self.sep_split][index].rstrip()
sample['event'] = self.get_event(event_path)
# data augment
if self.sep_split == 'train':
return self.transform_tr(sample)
elif self.sep_split == 'val':
return self.transform_val(sample), lbl_path
elif self.sep_split == 'test':
raise NotImplementedError
def relabel(self, label_path):
"""from apollo to the 18 class (Cityscapes without 'train', cls=16)"""
_temp = bn.numset(Image.open(label_path))
for k, v in KITTI2CS.items():
_temp[_temp == k] = v
return Image.fromnumset(_temp.convert_type(bn.uint8))
def get_event(self, event_path):
event_volume = bn.load(event_path)['data']
neg_volume = event_volume[:9, ...]
pos_volume = event_volume[9:, ...]
if self.args.event_dim == 18:
event_volume = bn.connect((neg_volume, pos_volume), axis=0)
elif self.args.event_dim == 2:
neg_img = bn.total_count(neg_volume, axis=0, keepdims=True)
pos_img = bn.total_count(pos_volume, axis=0, keepdims=True)
event_volume = bn.connect((neg_img, pos_img), axis=0)
elif self.args.event_dim == 1:
neg_img = bn.total_count(neg_volume, axis=0, keepdims=True)
pos_img = bn.total_count(pos_volume, axis=0, keepdims=True)
event_volume = neg_img + pos_img
return event_volume
def recursive_glob(self, rootdir='.', suffix=None):
if isinstance(suffix, str):
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
elif isinstance(suffix, list):
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for x in suffix for filename in filenames if filename.startswith(x)]
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(size=(1024, 2048)),
tr.ColorJitter(),
tr.RandomGaussianBlur(),
tr.RandomMotionBlur(),
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
tr.Normalize(average=(0.485, 0.456, 0.406), standard_op=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(size=self.args.crop_size),
tr.Normalize(average=(0.485, 0.456, 0.406), standard_op=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
args.event_dim = 2
cityscapes_train = Merge3(args, sep_split='train')
dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["imaginarye"].size()[0]):
img = sample['imaginarye'].beatnum()
gt = sample['label'].beatnum()
event = sample['event'].beatnum()
tmp = bn.numset(gt[jj]).convert_type(bn.uint8)
segmap = decode_segmap(tmp, dataset='cityscapes')
img_tmp = | bn.switching_places(img[jj], axes=[1, 2, 0]) | numpy.transpose |
import math
import os
import time
import beatnum as bn
from padd_concatle import fluid
from padd_concatle.fluid import layers
from pytracking.features import augmentation
from pytracking.libs import dcf, operation, fourier
from pytracking.libs.optimization import ConjugateGradient, GaussNewtonCG, GradientDescentL2
from pytracking.libs.padd_concatle_utils import mod, n2p, \
leaky_relu, dropout2d
from pytracking.libs.tensorlist import TensorList
from pytracking.tracker.atom.optim import FactorizedConvProblem, ConvProblem
from pytracking.tracker.base.basetracker import BaseTracker
class ATOM(BaseTracker):
def initialize_features(self):
if not getattr(self, 'features_initialized', False):
self.params.features.initialize()
self.features_initialized = True
def initialize(self, imaginarye, state, *args, **kwargs):
# Initialize some stuff
self.frame_num = 1
# TODO: for now, we don't support explictly setting up device
# if not hasattr(self.params, 'device'):
# self.params.device = 'cuda' if self.params.use_gpu else 'cpu'
# Initialize features
self.initialize_features()
# Check if imaginarye is color
self.params.features.set_is_color(imaginarye.shape[2] == 3)
# Get feature specific params
self.fparams = self.params.features.get_fparams('feature_params')
self.time = 0
tic = time.time()
# Get position and size
self.pos = bn.numset(
[state[1] + (state[3] - 1) / 2, state[0] + (state[2] - 1) / 2],
'float32')
self.target_sz = bn.numset([state[3], state[2]], 'float32')
# Set search area
self.target_scale = 1.0
search_area = bn.prod(self.target_sz * self.params.search_area_scale)
if search_area > self.params.get_max_imaginarye_sample_size:
self.target_scale = math.sqrt(search_area /
self.params.get_max_imaginarye_sample_size)
elif search_area < self.params.get_min_imaginarye_sample_size:
self.target_scale = math.sqrt(search_area /
self.params.get_min_imaginarye_sample_size)
# Check if IoUNet is used
self.use_iou_net = getattr(self.params, 'use_iou_net', True)
# Target size in base scale
self.base_target_sz = self.target_sz / self.target_scale
# Use odd square search area and set sizes
feat_get_max_stride = get_max(self.params.features.stride())
if getattr(self.params, 'search_area_shape', 'square') == 'square':
self.img_sample_sz = bn.create_ones((2, ), 'float32') * bn.round(
bn.sqrt(
bn.prod(self.base_target_sz *
self.params.search_area_scale)))
elif self.params.search_area_shape == 'initrect':
self.img_sample_sz = bn.round(self.base_target_sz *
self.params.search_area_scale)
else:
raise ValueError('Unknown search area shape')
if self.params.feature_size_odd:
self.img_sample_sz += feat_get_max_stride - mod(self.img_sample_sz,
(2 * feat_get_max_stride))
else:
self.img_sample_sz += feat_get_max_stride - mod(
(self.img_sample_sz + feat_get_max_stride), (2 * feat_get_max_stride))
# Set sizes
self.img_support_sz = self.img_sample_sz
self.feature_sz = self.params.features.size(self.img_sample_sz)
self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output
self.kernel_size = self.fparams.attribute('kernel_size')
self.iou_img_sample_sz = self.img_sample_sz
# Optimization options
self.params.precond_learning_rate = self.fparams.attribute(
'learning_rate')
if self.params.CG_forgetting_rate is None or get_max(
self.params.precond_learning_rate) >= 1:
self.params.direction_forget_factor = 0
else:
self.params.direction_forget_factor = (
1 - get_max(self.params.precond_learning_rate)
)**self.params.CG_forgetting_rate
self.output_window = None
if getattr(self.params, 'window_output', False):
if getattr(self.params, 'use_clipped_window', False):
self.output_window = dcf.hann2d_clipped(
self.output_sz.convert_type('long'),
self.output_sz.convert_type('long') *
self.params.effective_search_area /
self.params.search_area_scale,
centered=False)
else:
self.output_window = dcf.hann2d(
self.output_sz.convert_type('long'), centered=False)
# Initialize some learning things
self.init_learning()
# Convert imaginarye
im = imaginarye.convert_type('float32')
self.im = im # For debugging only
# Setup scale bounds
self.imaginarye_sz = bn.numset([im.shape[0], im.shape[1]], 'float32')
self.get_min_scale_factor = bn.get_max(10 / self.base_target_sz)
self.get_max_scale_factor = bn.get_min(self.imaginarye_sz / self.base_target_sz)
# Extract and transform sample
x = self.generate_init_samples(im)
# Initialize iounet
if self.use_iou_net:
self.init_iou_net()
# Initialize projection matrix
self.init_projection_matrix(x)
# Transform to get the training sample
train_x = self.preprocess_sample(x)
# Generate label function
init_y = self.init_label_function(train_x)
# Init memory
self.init_memory(train_x)
# Init optimizer and do initial optimization
self.init_optimization(train_x, init_y)
self.pos_iounet = self.pos.copy()
self.time += time.time() - tic
def track(self, imaginarye):
self.frame_num += 1
# Convert imaginarye
# im = beatnum_to_padd_concatle(imaginarye)
im = imaginarye.convert_type('float32')
self.im = im # For debugging only
# ------- LOCALIZATION ------- #
# Get sample
sample_pos = self.pos.round()
sample_scales = self.target_scale * self.params.scale_factors
test_x = self.extract_processed_sample(im, self.pos, sample_scales,
self.img_sample_sz)
# Compute scores
scores_raw = self.apply_filter(test_x)
translation_vec, scale_ind, s, flag = self.localize_target(scores_raw)
# Update position and scale
if flag != 'not_found':
if self.use_iou_net:
update_scale_flag = getattr(self.params,
'update_scale_when_uncertain',
True) or flag != 'uncertain'
if getattr(self.params, 'use_classifier', True):
self.update_state(sample_pos + translation_vec)
self.refine_target_box(sample_pos, sample_scales[scale_ind],
scale_ind, update_scale_flag)
elif getattr(self.params, 'use_classifier', True):
self.update_state(sample_pos + translation_vec,
sample_scales[scale_ind])
# ------- UPDATE ------- #
# Check flags and set learning rate if hard negative
update_flag = flag not in ['not_found', 'uncertain']
hard_negative = (flag == 'hard_negative')
learning_rate = self.params.hard_negative_learning_rate if hard_negative else None
if update_flag:
# Get train sample
train_x = TensorList([x[scale_ind:scale_ind + 1] for x in test_x])
# Create label for sample
train_y = self.get_label_function(sample_pos,
sample_scales[scale_ind])
# Update memory
self.update_memory(train_x, train_y, learning_rate)
# Train filter
if hard_negative:
self.filter_optimizer.run(self.params.hard_negative_CG_iter)
elif (self.frame_num - 1) % self.params.train_skipping == 0:
self.filter_optimizer.run(self.params.CG_iter)
self.filter = self.filter_optimizer.x
# Set the pos of the tracker to iounet pos
if self.use_iou_net and flag != 'not_found':
self.pos = self.pos_iounet.copy()
# Return new state
yx = self.pos - (self.target_sz - 1) / 2
new_state = bn.numset(
[yx[1], yx[0], self.target_sz[1], self.target_sz[0]], 'float32')
return new_state.tolist()
def update_memory(self,
sample_x: TensorList,
sample_y: TensorList,
learning_rate=None):
replace_ind = self.update_sample_weights(
self.sample_weights, self.previous_replace_ind,
self.num_stored_samples, self.num_init_samples, self.fparams,
learning_rate)
self.previous_replace_ind = replace_ind
for train_samp, x, ind in zip(self.training_samples, sample_x,
replace_ind):
train_samp[ind] = x[0]
for y_memory, y, ind in zip(self.y, sample_y, replace_ind):
y_memory[ind] = y[0]
if self.hinge_mask is not None:
for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind):
m[ind] = layers.cast(y >= self.params.hinge_threshold,
'float32')[0]
self.num_stored_samples += 1
def update_sample_weights(self,
sample_weights,
previous_replace_ind,
num_stored_samples,
num_init_samples,
fparams,
learning_rate=None):
# Update weights and get index to replace in memory
replace_ind = []
for sw, prev_ind, num_samp, num_init, fpar in zip(
sample_weights, previous_replace_ind, num_stored_samples,
num_init_samples, fparams):
lr = learning_rate
if lr is None:
lr = fpar.learning_rate
init_samp_weight = getattr(fpar, 'init_samples_get_minimum_weight',
None)
if init_samp_weight == 0:
init_samp_weight = None
s_ind = 0 if init_samp_weight is None else num_init
if num_samp == 0 or lr == 1:
sw[:] = 0
sw[0] = 1
r_ind = 0
else:
# Get index to replace
r_ind = bn.get_argget_min_value(sw[s_ind:], 0)
r_ind = int(r_ind + s_ind)
# Update weights
if prev_ind is None:
sw /= 1 - lr
sw[r_ind] = lr
else:
sw[r_ind] = sw[prev_ind] / (1 - lr)
sw /= sw.total_count()
if init_samp_weight is not None and sw[:num_init].total_count(
) < init_samp_weight:
sw /= init_samp_weight + sw[num_init:].total_count()
sw[:num_init] = init_samp_weight / num_init
replace_ind.apd(r_ind)
return replace_ind
def localize_target(self, scores_raw):
# Weighted total_count (if multiple features) with interpolation in fourier domain
weight = self.fparams.attribute('translation_weight', 1.0)
scores_raw = weight * scores_raw
sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) *
scores_raw.size(3))
for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)):
sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (
1 - bn.numset([ksz[0] % 2, ksz[1] % 2]) / sz))
scores_fs = fourier.total_count_fs(sf_weighted)
scores = fourier.sample_fs(scores_fs, self.output_sz)
if self.output_window is not None and not getattr(
self.params, 'perform_hn_without_windowing', False):
scores *= self.output_window
if getattr(self.params, 'advanced_localization', False):
return self.localize_advanced(scores)
# Get get_maximum
get_max_score, get_max_disp = dcf.get_max2d(scores)
scale_ind = bn.get_argget_max(get_max_score, axis=0)[0]
get_max_disp = get_max_disp.convert_type('float32')
# Convert to displacements in the base scale
output_sz = self.output_sz.copy()
disp = mod((get_max_disp + output_sz / 2), output_sz) - output_sz / 2
# Compute translation vector and scale change factor
translation_vec = bn.change_shape_to(
disp[scale_ind].convert_type('float32'), [-1]) * (
self.img_support_sz / self.output_sz) * self.target_scale
translation_vec *= self.params.scale_factors[scale_ind]
# Shift the score output for visualization purposes
if self.params.debug >= 2:
sz = scores.shape[-2:]
scores = bn.connect(
[scores[..., sz[0] // 2:, :], scores[..., :sz[0] // 2, :]], -2)
scores = bn.connect(
[scores[..., sz[1] // 2:], scores[..., :sz[1] // 2]], -1)
return translation_vec, scale_ind, scores, None
def update_state(self, new_pos, new_scale=None):
# Update scale
if new_scale is not None:
self.target_scale = bn.clip(new_scale, self.get_min_scale_factor,
self.get_max_scale_factor)
self.target_sz = self.base_target_sz * self.target_scale
# Update pos
inside_ratio = 0.2
inside_offset = (inside_ratio - 0.5) * self.target_sz
self.pos = bn.get_maximum(
bn.get_minimum(new_pos,
self.imaginarye_sz.convert_type('float32') - inside_offset),
inside_offset)
def get_label_function(self, sample_pos, sample_scale):
# Generate label function
train_y = TensorList()
target_center_normlizattion = (self.pos - sample_pos) / (self.img_support_sz *
sample_scale)
for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size):
center = sz * target_center_normlizattion + 0.5 * bn.numset(
[(ksz[0] + 1) % 2, (ksz[1] + 1) % 2], 'float32')
train_y.apd(dcf.label_function_spatial(sz, sig, center))
return train_y
def extract_sample(self,
im: bn.ndnumset,
pos: bn.ndnumset,
scales,
sz: bn.ndnumset,
debug_save_name):
return self.params.features.extract(im, pos, scales, sz,
debug_save_name)
def extract_processed_sample(self,
im: bn.ndnumset,
pos: bn.ndnumset,
scales,
sz: bn.ndnumset,
debug_save_name=None) -> (TensorList,
TensorList):
x = self.extract_sample(im, pos, scales, sz, debug_save_name)
return self.preprocess_sample(self.project_sample(x))
def apply_filter(self, sample_x: TensorList):
with fluid.dygraph.guard():
sample_x = sample_x.apply(n2p)
filter = self.filter.apply(n2p)
return operation.conv2d(sample_x, filter, mode='same').beatnum()
def init_projection_matrix(self, x):
# Set if using projection matrix
self.params.use_projection_matrix = getattr(
self.params, 'use_projection_matrix', True)
if self.params.use_projection_matrix:
self.remove_masked_data_dim = self.fparams.attribute('remove_masked_data_dim', None)
proj_init_method = getattr(self.params, 'proj_init_method', 'pca')
if proj_init_method == 'pca':
raise NotImplementedError
elif proj_init_method == 'randn':
with fluid.dygraph.guard():
self.projection_matrix = TensorList([
None if cdim is None else layers.gaussian_random(
(cdim, ex.shape[1], 1, 1), 0.0,
1 / math.sqrt(ex.shape[1])).beatnum()
for ex, cdim in zip(x, self.remove_masked_data_dim)
])
elif proj_init_method == 'bn_randn':
rng = bn.random.RandomState(0)
self.projection_matrix = TensorList([
None if cdim is None else rng.normlizattional(
size=(cdim, ex.shape[1], 1, 1),
loc=0.0,
scale=1 / math.sqrt(ex.shape[1])).convert_type('float32')
for ex, cdim in zip(x, self.remove_masked_data_dim)
])
elif proj_init_method == 'create_ones':
self.projection_matrix = TensorList([
None if cdim is None else
bn.create_ones((cdim, ex.shape[1], 1, 1),
'float32') / math.sqrt(ex.shape[1])
for ex, cdim in zip(x, self.remove_masked_data_dim)
])
else:
self.remove_masked_data_dim = x.size(1)
self.projection_matrix = TensorList([None] * len(x))
def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList):
if getattr(self.params, '_feature_window', False):
x = x * self.feature_window
return x
def init_label_function(self, train_x):
# Allocate label function
self.y = TensorList([
bn.zeros(
[self.params.sample_memory_size, 1, x.shape[2], x.shape[3]],
'float32') for x in train_x
])
# Output sigma factor
output_sigma_factor = self.fparams.attribute('output_sigma_factor')
self.sigma = output_sigma_factor * bn.create_ones((2, ), 'float32') * (
self.feature_sz / self.img_support_sz *
self.base_target_sz).apply(bn.prod).apply(bn.sqrt)
# Center pos in normlizattionalized coords
target_center_normlizattion = (self.pos - bn.round(self.pos)) / (
self.target_scale * self.img_support_sz)
# Generate label functions
for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz,
self.kernel_size, train_x):
center_pos = sz * target_center_normlizattion + 0.5 * bn.numset(
[(ksz[0] + 1) % 2, (ksz[1] + 1) % 2], 'float32')
for i, T in enumerate(self.transforms[:x.shape[0]]):
sample_center = center_pos + bn.numset(
T.shift, 'float32') / self.img_support_sz * sz
y[i] = dcf.label_function_spatial(sz, sig, sample_center)
# Return only the create_ones to use for initial training
return TensorList([y[:x.shape[0]] for y, x in zip(self.y, train_x)])
def init_memory(self, train_x):
# Initialize first-frame training samples
self.num_init_samples = train_x.size(0)
self.init_sample_weights = TensorList(
[bn.create_ones(x.shape[0], 'float32') / x.shape[0] for x in train_x])
self.init_training_samples = train_x
# Sample counters and weights
self.num_stored_samples = self.num_init_samples.copy()
self.previous_replace_ind = [None] * len(self.num_stored_samples)
self.sample_weights = TensorList([
bn.zeros(self.params.sample_memory_size, 'float32') for x in train_x
])
for sw, init_sw, num in zip(self.sample_weights,
self.init_sample_weights,
self.num_init_samples):
sw[:num] = init_sw
# Initialize memory
self.training_samples = TensorList(
[[bn.zeros([cdim, x.shape[2], x.shape[3]], 'float32')] *
self.params.sample_memory_size
for x, cdim in zip(train_x, self.remove_masked_data_dim)])
def init_learning(self):
# Get window function
self.feature_window = TensorList(
[dcf.hann2d(sz) for sz in self.feature_sz])
# Filter regularization
self.filter_reg = self.fparams.attribute('filter_reg')
# Activation function after the projection matrix (phi_1 in the paper)
projection_activation = getattr(self.params, 'projection_activation',
'none')
if isinstance(projection_activation, tuple):
projection_activation, act_param = projection_activation
if projection_activation == 'none':
self.projection_activation = lambda x: x
elif projection_activation == 'relu':
self.projection_activation = layers.relu
elif projection_activation == 'elu':
self.projection_activation = layers.elu
elif projection_activation == 'mlu':
self.projection_activation = lambda x: layers.elu(leaky_relu(x, 1 / act_param), act_param)
else:
raise ValueError('Unknown activation')
# Activation function after the output scores (phi_2 in the paper)
response_activation = getattr(self.params, 'response_activation',
'none')
if isinstance(response_activation, tuple):
response_activation, act_param = response_activation
if response_activation == 'none':
self.response_activation = lambda x: x
elif response_activation == 'relu':
self.response_activation = layers.relu
elif response_activation == 'elu':
self.response_activation = layers.elu
elif response_activation == 'mlu':
self.response_activation = lambda x: layers.elu(leaky_relu(x, 1 / act_param), act_param)
else:
raise ValueError('Unknown activation')
def generate_init_samples(self, im: bn.ndnumset) -> TensorList:
"""Generate augmented initial samples."""
# Compute augmentation size
aug_expansion_factor = getattr(self.params,
'augmentation_expansion_factor', None)
aug_expansion_sz = self.img_sample_sz.copy()
aug_output_sz = None
if aug_expansion_factor is not None and aug_expansion_factor != 1:
aug_expansion_sz = (self.img_sample_sz *
aug_expansion_factor).convert_type('long')
aug_expansion_sz += (
aug_expansion_sz - self.img_sample_sz.convert_type('long')) % 2
aug_expansion_sz = aug_expansion_sz.convert_type('float32')
aug_output_sz = self.img_sample_sz.convert_type('long').tolist()
# Random shift operator
get_rand_shift = lambda: None
random_shift_factor = getattr(self.params, 'random_shift_factor', 0)
if random_shift_factor > 0:
get_rand_shift = lambda: ((bn.random.uniform(size=[2]) - 0.5) * self.img_sample_sz * random_shift_factor).convert_type('long').tolist()
# Create transofmations
self.transforms = [augmentation.Identity(aug_output_sz)]
if 'shift' in self.params.augmentation:
self.transforms.extend([
augmentation.Translation(shift, aug_output_sz)
for shift in self.params.augmentation['shift']
])
if 'relativeshift' in self.params.augmentation:
get_absoluteolute = lambda shift: (bn.numset(shift, 'float32') * self.img_sample_sz / 2).convert_type('long').tolist()
self.transforms.extend([
augmentation.Translation(get_absoluteolute(shift), aug_output_sz)
for shift in self.params.augmentation['relativeshift']
])
if 'fliplr' in self.params.augmentation and self.params.augmentation[
'fliplr']:
self.transforms.apd(
augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))
if 'blur' in self.params.augmentation:
self.transforms.extend([
augmentation.Blur(sigma, aug_output_sz, get_rand_shift())
for sigma in self.params.augmentation['blur']
])
if 'scale' in self.params.augmentation:
self.transforms.extend([
augmentation.Scale(scale_factor, aug_output_sz,
get_rand_shift())
for scale_factor in self.params.augmentation['scale']
])
if 'rotate' in self.params.augmentation:
self.transforms.extend([
augmentation.Rotate(angle, aug_output_sz, get_rand_shift())
for angle in self.params.augmentation['rotate']
])
# Generate initial samples
init_samples = self.params.features.extract_transformed(
im, self.pos, self.target_scale, aug_expansion_sz, self.transforms)
# Remove augmented samples for those that shtotal not have
for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):
if not use_aug:
init_samples[i] = init_samples[i][0:1]
# Add dropout samples
if 'dropout' in self.params.augmentation:
num, prob = self.params.augmentation['dropout']
self.transforms.extend(self.transforms[:1] * num)
with fluid.dygraph.guard():
for i, use_aug in enumerate(
self.fparams.attribute('use_augmentation')):
if use_aug:
init_samples[i] = bn.connect([
init_samples[i], dropout2d(
layers.expand(
n2p(init_samples[i][0:1]), (num, 1, 1, 1)),
prob,
is_train=True).beatnum()
])
return init_samples
def init_optimization(self, train_x, init_y):
# Initialize filter
filter_init_method = getattr(self.params, 'filter_init_method', 'zeros')
self.filter = TensorList([
bn.zeros([1, cdim, sz[0], sz[1]], 'float32')
for x, cdim, sz in zip(train_x, self.remove_masked_data_dim,
self.kernel_size)
])
if filter_init_method == 'zeros':
pass
elif filter_init_method == 'create_ones':
for idx, f in enumerate(self.filter):
self.filter[idx] = bn.create_ones(f.shape,
'float32') / bn.prod(f.shape)
elif filter_init_method == 'bn_randn':
rng = bn.random.RandomState(0)
for idx, f in enumerate(self.filter):
self.filter[idx] = rng.normlizattional(
size=f.shape, loc=0,
scale=1 / bn.prod(f.shape)).convert_type('float32')
elif filter_init_method == 'randn':
for idx, f in enumerate(self.filter):
with fluid.dygraph.guard():
self.filter[idx] = layers.gaussian_random(
f.shape, standard_op=1 / bn.prod(f.shape)).beatnum()
else:
raise ValueError('Unknown "filter_init_method"')
# Get parameters
self.params.update_projection_matrix = getattr(
self.params, 'update_projection_matrix',
True) and self.params.use_projection_matrix
optimizer = getattr(self.params, 'optimizer', 'GaussNewtonCG')
# Setup factorized joint optimization
if self.params.update_projection_matrix:
self.joint_problem = FactorizedConvProblem(
self.init_training_samples, init_y, self.filter_reg,
self.fparams.attribute('projection_reg'), self.params,
self.init_sample_weights, self.projection_activation,
self.response_activation)
# Variable containing both filter and projection matrix
joint_var = self.filter.concat(self.projection_matrix)
# Initialize optimizer
analyze_convergence = getattr(self.params, 'analyze_convergence',
False)
if optimizer == 'GaussNewtonCG':
self.joint_optimizer = GaussNewtonCG(
self.joint_problem,
joint_var,
plotting=(self.params.debug >= 3),
analyze=True,
fig_num=(12, 13, 14))
elif optimizer == 'GradientDescentL2':
self.joint_optimizer = GradientDescentL2(
self.joint_problem,
joint_var,
self.params.optimizer_step_length,
self.params.optimizer_momentum,
plotting=(self.params.debug >= 3),
debug=analyze_convergence,
fig_num=(12, 13))
# Do joint optimization
if isinstance(self.params.init_CG_iter, (list, tuple)):
self.joint_optimizer.run(self.params.init_CG_iter)
else:
self.joint_optimizer.run(self.params.init_CG_iter //
self.params.init_GN_iter,
self.params.init_GN_iter)
# Get back filter and optimizer
len_x = len(self.joint_optimizer.x)
self.filter = self.joint_optimizer.x[:len_x // 2] # w2 in paper
self.projection_matrix = self.joint_optimizer.x[len_x //
2:] # w1 in paper
if analyze_convergence:
opt_name = 'CG' if getattr(self.params, 'CG_optimizer',
True) else 'GD'
for val_name, values in zip(['loss', 'gradient'], [
self.joint_optimizer.losses,
self.joint_optimizer.gradient_mags
]):
val_str = ' '.join(
['{:.8e}'.format(v.item()) for v in values])
file_name = '{}_{}.txt'.format(opt_name, val_name)
with open(file_name, 'a') as f:
f.write(val_str + '\n')
raise RuntimeError('Exiting')
# Re-project samples with the new projection matrix
remove_masked_data_samples = self.project_sample(self.init_training_samples,
self.projection_matrix)
for train_samp, init_samp in zip(self.training_samples,
remove_masked_data_samples):
for idx in range(init_samp.shape[0]):
train_samp[idx] = init_samp[idx]
self.hinge_mask = None
# Initialize optimizer
self.conv_problem = ConvProblem(self.training_samples, self.y,
self.filter_reg, self.sample_weights,
self.response_activation)
if optimizer == 'GaussNewtonCG':
self.filter_optimizer = ConjugateGradient(
self.conv_problem,
self.filter,
fletcher_reeves=self.params.fletcher_reeves,
direction_forget_factor=self.params.direction_forget_factor,
debug=(self.params.debug >= 3),
fig_num=(12, 13))
elif optimizer == 'GradientDescentL2':
self.filter_optimizer = GradientDescentL2(
self.conv_problem,
self.filter,
self.params.optimizer_step_length,
self.params.optimizer_momentum,
debug=(self.params.debug >= 3),
fig_num=12)
# Transfer losses from previous optimization
if self.params.update_projection_matrix:
self.filter_optimizer.residuals = self.joint_optimizer.residuals
self.filter_optimizer.losses = self.joint_optimizer.losses
if not self.params.update_projection_matrix:
self.filter_optimizer.run(self.params.init_CG_iter)
# Post optimization
self.filter_optimizer.run(self.params.post_init_CG_iter)
self.filter = self.filter_optimizer.x
# Free memory
del self.init_training_samples
if self.params.use_projection_matrix:
del self.joint_problem, self.joint_optimizer
def project_sample(self, x: TensorList, proj_matrix=None):
# Apply projection matrix
if proj_matrix is None:
proj_matrix = self.projection_matrix
with fluid.dygraph.guard():
return operation.conv2d(x.apply(n2p), proj_matrix.apply(n2p)).apply(
self.projection_activation).beatnum()
def get_iounet_box(self, pos, sz, sample_pos, sample_scale):
"""All ibnuts in original imaginarye coordinates"""
box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz
- 1) / 2
box_sz = sz / sample_scale
target_ul = box_center - (box_sz - 1) / 2
return bn.connect([bn.flip(target_ul, 0), bn.flip(box_sz, 0)])
def get_iou_features(self):
return self.params.features.get_uniq_attribute('iounet_features')
def get_iou_backbone_features(self):
return self.params.features.get_uniq_attribute(
'iounet_backbone_features')
def init_iou_net(self):
# Setup IoU net
self.iou_predictor = self.params.features.get_uniq_attribute(
'iou_predictor')
# Get target boxes for the differenceerent augmentations
self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz,
self.pos.round(),
self.target_scale)
target_boxes = TensorList()
if self.params.iounet_augmentation:
for T in self.transforms:
if not isinstance(
T, (augmentation.Identity, augmentation.Translation,
augmentation.FlipHorizontal,
augmentation.FlipVertical, augmentation.Blur)):
break
target_boxes.apd(self.iou_target_box + bn.numset(
[T.shift[1], T.shift[0], 0, 0]))
else:
target_boxes.apd(self.iou_target_box.copy())
target_boxes = bn.connect(target_boxes.view(1, 4), 0)
# Get iou features
iou_backbone_features = self.get_iou_backbone_features()
# Remove other augmentations such as rotation
iou_backbone_features = TensorList(
[x[:target_boxes.shape[0], ...] for x in iou_backbone_features])
# Extract target feat
with fluid.dygraph.guard():
iou_backbone_features = iou_backbone_features.apply(n2p)
target_boxes = n2p(target_boxes)
target_feat = self.iou_predictor.get_filter(iou_backbone_features,
target_boxes)
self.target_feat = TensorList(
[layers.reduce_average(x, 0).beatnum() for x in target_feat])
if getattr(self.params, 'iounet_not_use_reference', False):
self.target_feat = TensorList([
bn.full_value_func_like(tf, tf.normlizattion() / tf.numel())
for tf in self.target_feat
])
def optimize_boxes(self, iou_features, init_boxes):
with fluid.dygraph.guard():
# Optimize iounet boxes
init_boxes = bn.change_shape_to(init_boxes, (1, -1, 4))
step_length = self.params.box_refinement_step_length
target_feat = self.target_feat.apply(n2p)
iou_features = iou_features.apply(n2p)
output_boxes = n2p(init_boxes)
for f in iou_features:
f.stop_gradient = False
for i_ in range(self.params.box_refinement_iter):
# forward pass
bb_init = output_boxes
bb_init.stop_gradient = False
outputs = self.iou_predictor.predict_iou(target_feat,
iou_features, bb_init)
if isinstance(outputs, (list, tuple)):
outputs = outputs[0]
outputs.backward()
# Update proposal
bb_init_bn = bb_init.beatnum()
bb_init_gd = bb_init.gradient()
output_boxes = bb_init_bn + step_length * bb_init_gd * bn.tile(
bb_init_bn[:, :, 2:], (1, 1, 2))
output_boxes = n2p(output_boxes)
step_length *= self.params.box_refinement_step_decay
return layers.change_shape_to(output_boxes, (
-1, 4)).beatnum(), layers.change_shape_to(outputs, (-1, )).beatnum()
def refine_target_box(self,
sample_pos,
sample_scale,
scale_ind,
update_scale=True):
# Initial box for refinement
init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos,
sample_scale)
# Extract features from the relevant scale
iou_features = self.get_iou_features()
iou_features = TensorList(
[x[scale_ind:scale_ind + 1, ...] for x in iou_features])
init_boxes = bn.change_shape_to(init_box, (1, 4)).copy()
rand_fn = lambda a, b: bn.random.rand(a, b).convert_type('float32')
if self.params.num_init_random_boxes > 0:
# Get random initial boxes
square_box_sz = bn.sqrt(init_box[2:].prod())
rand_factor = square_box_sz * bn.connect([
self.params.box_jitter_pos * bn.create_ones(2),
self.params.box_jitter_sz * bn.create_ones(2)
])
get_minimal_edge_size = init_box[2:].get_min() / 3
rand_bb = (rand_fn(self.params.num_init_random_boxes, 4) - 0.5
) * rand_factor
new_sz = bn.clip(init_box[2:] + rand_bb[:, 2:], get_minimal_edge_size,
1e10)
new_center = (init_box[:2] + init_box[2:] / 2) + rand_bb[:, :2]
init_boxes = bn.connect([new_center - new_sz / 2, new_sz], 1)
init_boxes = bn.connect(
[bn.change_shape_to(init_box, (1, 4)), init_boxes])
# Refine boxes by get_maximizing iou
output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)
# Remove weird boxes with extreme aspect ratios
output_boxes[:, 2:] = bn.clip(output_boxes[:, 2:], 1, 1e10)
aspect_ratio = output_boxes[:, 2] / output_boxes[:, 3]
keep_ind = (aspect_ratio < self.params.get_maximal_aspect_ratio) * \
(aspect_ratio > 1 / self.params.get_maximal_aspect_ratio)
output_boxes = output_boxes[keep_ind, :]
output_iou = output_iou[keep_ind]
# If no box found
if output_boxes.shape[0] == 0:
return
# Take average of top k boxes
k = getattr(self.params, 'iounet_k', 5)
topk = get_min(k, output_boxes.shape[0])
inds = bn.argsort(-output_iou)[:topk]
predicted_box = bn.average(output_boxes[inds, :], axis=0)
predicted_iou = bn.average(
| bn.change_shape_to(output_iou, (-1, 1)) | numpy.reshape |
"""
CS6476: Problem Set 4 Tests
"""
import beatnum as bn
import cv2
import unittest
import ps4
INPUT_DIR = "ibnut_imaginaryes/test_imaginaryes/"
class Part1(unittest.TestCase):
@classmethod
def setUpClass(self):
self.ibnut_imgs_1 = ['test_lk1.png', 'test_lk3.png', 'test_lk5.png']
self.ibnut_imgs_2 = ['test_lk2.png', 'test_lk4.png', 'test_lk6.png']
self.delta_c = [0, 0, -1]
self.delta_r = [0, -1, -1]
self.r_val = [14, 12, 14]
self.c_val = [15, 16, 15]
self.cb = [(28, 30), (24, 32), (28, 30)]
self.k_size = 15
self.k_type = 'uniform'
def test_optic_flow_LK(self):
for i in range(3):
f1 = self.ibnut_imgs_1[i]
f2 = self.ibnut_imgs_2[i]
img1 = cv2.imread(INPUT_DIR + f1, 0) / 255.
img2 = cv2.imread(INPUT_DIR + f2, 0) / 255.
u, v = ps4.optic_flow_lk(img1.copy(), img2.copy(),
self.k_size, self.k_type, 1.)
r = self.r_val[i]
c = self.c_val[i]
d_c = self.delta_c[i]
d_r = self.delta_r[i]
center_box = self.cb[i]
u_average = bn.average(u[r:r + center_box[0],
c:c + center_box[1]])
check_u = absolute(u_average - d_c) <= 0.5
error_msg = "Average of U values in the area filter_condition there is " \
"movement is greater than the totalowed amount."
self.assertTrue(check_u, error_msg)
v_average = bn.average(v[r:r + center_box[0],
c:c + center_box[1]])
check_v = absolute(v_average - d_r) <= 0.5
error_msg = "Average of V values in the area filter_condition there is " \
"movement is greater than the totalowed amount."
self.assertTrue(check_v, error_msg)
class Part2(unittest.TestCase):
def test_reduce(self):
ibnut_imgs = ['test_reduce1_img.bny', 'test_reduce2_img.bny',
'test_reduce3_img.bny']
ref_imgs = ['test_reduce1_ref.bny', 'test_reduce2_ref.bny',
'test_reduce3_ref.bny']
for i in range(3):
f1 = ibnut_imgs[i]
f2 = ref_imgs[i]
test_numset = bn.load(INPUT_DIR + f1)
reduced = ps4.reduce_imaginarye(test_numset.copy())
ref_reduced = bn.load(INPUT_DIR + f2)
correct = bn.totalclose(reduced, ref_reduced, atol=0.05)
self.assertTrue(correct, "Output does not match the reference "
"solution.")
def test_expand(self):
ibnut_imgs = ['test_expand1_img.bny', 'test_expand2_img.bny',
'test_expand3_img.bny']
ref_imgs = ['test_expand1_ref.bny', 'test_expand2_ref.bny',
'test_expand3_ref.bny']
for i in range(3):
f1 = ibnut_imgs[i]
f2 = ref_imgs[i]
test_numset = bn.load(INPUT_DIR + f1)
expanded = ps4.expand_imaginarye(test_numset.copy())
ref_expanded = bn.load(INPUT_DIR + f2)
correct = | bn.totalclose(expanded, ref_expanded, atol=0.05) | numpy.allclose |
'''
Main Author: <NAME>
Corresponding Email: <EMAIL>
'''
import beatnum as bn
from .base import ClassificationDecider
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import Ridge
from sklearn.utils.validation import (
check_X_y,
check_numset,
NotFittedError,
)
from sklearn.utils.multiclass import type_of_target
class SimpleArgget_maxAverage(ClassificationDecider):
"""
Doc string here.
"""
def __init__(self, classes=[]):
self.classes = classes
self._is_fitted = False
def fit(
self,
X,
y,
transformer_id_to_transformers,
transformer_id_to_voters,
classes=None,
):
if not isinstance(self.classes, (list, bn.ndnumset)):
if len(y) == 0:
raise ValueError("Classification Decider classes undefined with no class labels fed to fit")
else:
self.classes = | bn.uniq(y) | numpy.unique |
"""
.. module:: dst_povm_sampling.py
:synopsis: Sample projective measurements in the way that DST does
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import division, absoluteolute_import, print_function, unicode_literals
import beatnum as bn
from itertools import product
def reseed_choice(a, size=None, replace=True, p=None):
"""Wrapper for the beatnum choice function that reseeds before sampling to
ensure that it doesn't make identical choices accross differenceerent partotalel
runs.
"""
bn.random.seed()
return bn.random.choice(a=a, size=size, replace=replace, p=p)
def x_state(anc_outcome, sys_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the x-measurement on the ancilla and
a particular outcome (:math:`\widetilde{\pm}1`) of the x-measurement on the
system:
.. math::
\begin{align}
\vert\psi\rangle&=\cos\frac{\theta}{2}\vert0\rangle+
\sin\frac{\theta}{2}\vert1\rangle \\
\theta&=\begin{cases}\operatorname{arctan2}\left(\pm2\cos\varphi,
\,-\sin^2\varphi\right) & \widetilde{+} \\
0 & \widetilde{-}\end{cases}
\end{align}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
x-measurement
:param sys_outcome: :math:`\widetilde{\pm}1`, indicates eigenvalue observed
on system x-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
theta = bn.filter_condition(anc_outcome > 0, bn.arctan2(2*sys_outcome*bn.cos(phi),
-bn.sin(phi)**2), 0)
return bn.numset([bn.cos(theta/2), bn.sin(theta/2)])
def y_state(anc_outcome, sys_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the y-measurement on the ancilla and
a particular outcome on the system (:math:`\widetilde{\pm}1`):
.. math::
\begin{align}
\vert\psi\rangle&=\cos\frac{\theta}{2}\vert0\rangle+
\sin\frac{\theta}{2}\vert1\rangle \\
\theta&=\operatorname{arccos}\left(\widetilde{\pm}
\frac{2\left\{\begin{numset}{l r}\sin(\varphi+\pi/4) & + \\
\cos(\varphi+\pi/4) & -\end{numset}\right\}^2-1}{2\left\{\begin{numset}
{l r}\sin(\varphi+\pi/4) & + \\ \cos(\varphi+\pi/4) & -\end{numset}
\right\}^2+1}\right)
\end{align}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
z-measurement
:param sys_outcome: :math:`\widetilde{\pm}1`, indicates eigenvalue observed
on system x-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
sc = bn.filter_condition(anc_outcome > 0, bn.sin(phi + bn.pi/4), bn.cos(phi + bn.pi/4))
theta = bn.arccos(sys_outcome*(2*sc**2 - 1)/(2*sc**2 + 1))
return bn.numset([bn.cos(theta/2), bn.sin(theta/2)])
def z_state(anc_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the z-measurement on the ancilla:
.. math::
\vert\psi\rangle=\frac{\vert0\rangle+e^{\mp i\varphi}\vert1\rangle}
{\sqrt{2}}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
z-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
return bn.numset([(1. + 0.j)* | bn.absolute(anc_outcome) | numpy.abs |
import beatnum as bn
import matplotlib.pyplot as plt
from beatnum import atleast_2d as twod
################################################################################
## PLOTTING FUNCTIONS #########################################################
################################################################################
def plotClassify2D(learner, X, Y, pre=lambda x: x, axis=None, nGrid=128, **kwargs):
"""
Plot data and classifier outputs on two-dimensional data.
This function plot data (X,Y) and learner.predict(X, Y)
together. The learner is is predicted on a dense grid
covering data X, to show its decision boundary.
Parameters
----------
learner : learner object
A trained learner object that inherits from one of
the 'Classify' or 'Regressor' base classes.
X : beatnum numset
N x M numset of data; N = number of data, M = dimension
(number of features) of data.
Y : beatnum numset
1 x N arra containing labels corresponding to data points
in X.
pre : function object (optional)
Function that is applied to X before prediction.
axis : a matplotlib axis / plottable object (optional)
nGrid : density of 2D grid points (default 128)
"""
if twod(X).shape[1] != 2:
raise ValueError('plotClassify2D: function can only be ctotaled using two-dimensional data (features)')
# TODO: Clean up code
if axis == None: axis = plt
axis.plot( X[:,0],X[:,1], 'k.', visible=False )
# TODO: can probably replace with final dot plot and use transparency for imaginarye (?)
ax = axis.axis()
xticks = bn.linspace(ax[0],ax[1],nGrid)
yticks = bn.linspace(ax[2],ax[3],nGrid)
grid = bn.meshgrid( xticks, yticks )
XGrid = bn.pile_operation_col( (grid[0].convert_into_one_dim(), grid[1].convert_into_one_dim()) )
if learner is not None:
YGrid = learner.predict( pre(XGrid) )
#axis.contourf( xticks,yticks,YGrid.change_shape_to( (len(xticks),len(yticks)) ), nClasses )
axis.imshow( YGrid.change_shape_to( (len(xticks),len(yticks)) ), extent=ax, interpolation='nearest',origin='lower',alpha=0.5, aspect='auto' )
cmap = plt.cm.get_cmap()
# TODO: if Soft: predictSoft; get colors for each class from cmap; blend pred with colors & show
#
try: classes = bn.numset(learner.classes);
except Exception: classes = bn.uniq(Y)
cvals = (classes - get_min(classes))/(get_max(classes)-get_min(classes)+1e-100)
for i,c in enumerate(classes):
axis.plot( X[Y==c,0],X[Y==c,1], 'ko', color=cmap(cvals[i]), **kwargs )
axis.axis(ax);
def histy(X,Y,axis=None,**kwargs):
"""
Plot a hist_operation (using matplotlib.hist) with multiple classes of data
Any add_concatitional arguments are passed directly into hist()
Each class of data are plotted as a differenceerent color
To specify specific hist_operation colors, use e.g. facecolor={0:'blue',1:'green',...}
so that facecolor[c] is the color for class c
Related but slightly differenceerent appearance to e.g.
matplotlib.hist( [X[Y==c] for c in bn.uniq(Y)] , histtype='barpile_operationed' )
"""
if axis == None: axis = plt
yvals = bn.uniq(Y)
nil, bin_edges = bn.hist_operation(X, **kwargs)
C,H = len(yvals),len(nil)
hist = bn.zeros( shape=(C,H) )
cmap = plt.cm.get_cmap()
cvals = (yvals - get_min(yvals))/(get_max(yvals)-get_min(yvals)+1e-100)
widthFrac = .25+.75/(1.2+2*bn.log10(len(yvals)))
for i,c in enumerate(yvals):
histc,nil = | bn.hist_operation(X[Y==c],bins=bin_edges) | numpy.histogram |