repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
microhh/microhh | kernel_tuner/statistics.py | 5 | 1185 | import matplotlib.pyplot as pl
import numpy as np
import json
import glob
pl.close('all')
pl.ion()
def get_timings(kernel_name, gridsizes):
dt = np.zeros_like(gridsizes, dtype=float)
for i,gridsize in enumerate(gridsizes):
with open( '{0}_{1:03d}.json'.format(kernel_name, gridsize) ) as f:
data = json.load(f)
timings = data[0]
fastest = 1e9
for timing in timings:
fastest = min(fastest, timing['time'])
dt[i] = fastest
return dt
if __name__ == '__main__':
gridsize = np.arange(32, 513, 32)
normal = get_timings('diff_c_g', gridsize)
smem = get_timings('diff_c_g_smem', gridsize)
fac = gridsize**3
pl.figure(figsize=(8,4))
pl.subplot(121)
pl.plot(gridsize, normal/fac, 'k-x', label='non smem')
pl.plot(gridsize, smem /fac, 'r-x', label='smem')
pl.ylim(0, 2e-7)
pl.ylabel('time/gridpoint (s)')
pl.xlabel('gridpoints (-)')
pl.legend()
pl.grid()
pl.subplot(122)
pl.plot(gridsize, normal/smem, 'k-x')
pl.ylabel('non_smem/smem (-)')
pl.xlabel('gridpoints (-)')
pl.grid()
pl.tight_layout()
| gpl-3.0 |
altairpearl/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
timpalpant/KaggleTSTextClassification | scripts/plot_feature_label_correlations.py | 1 | 1976 | #!/usr/bin/env python
'''
Compute mutual information between individual features
and labels
'''
import argparse
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from common import *
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('features', type=load_npz,
help='Training data features (npz)')
parser.add_argument('labels', type=load_npz,
help='Training data labels (npz)')
parser.add_argument('output',
help='Output file with plots (pdf)')
return parser
if __name__ == "__main__":
args = opts().parse_args()
print "Loading labels"
labels = args.labels['labels']
header = args.labels['header']
pdf = PdfPages(args.output)
#print "Plotting boolean features conditioned on labels"
#bf = args.features['bfeatures']
#n = bf.shape[1]
#m = np.zeros((n,11))
#m[:,0] = np.sum(bf==-1, axis=0)
#m[:,1] = np.sum(bf==0, axis=0)
#m[:,2] = np.sum(bf==1, axis=0)
#fig = plt.figure()
#pdf.savefig(fig)
#plt.close()
print "Plotting float features conditioned on labels"
ff = args.features['ffeatures']
n = ff.shape[1]
x = np.arange(n)
for i, l in enumerate(labels.T):
print "label %d" % i
for j, f in enumerate(ff.T):
print "...ffeature %d" % j
fig = plt.figure()
plt.hist(f[l], normed=True, label='P(f | l)',
color='blue', alpha=0.4,
range=(f.min(),f.max()), bins=25)
plt.hist(f[np.logical_not(l)], normed=True, label='P(f | ~l)',
color='green', alpha=0.4,
range=(f.min(),f.max()), bins=25)
plt.xlim(f.min(), f.max())
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('FFeature %d, Label %s' % (j, header[i]))
pdf.savefig(fig)
plt.close()
pdf.close() | gpl-3.0 |
harlowja/networkx | examples/algorithms/blockmodel.py | 32 | 3009 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H=nx.connected_component_subgraphs(G)[0]
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
MannyGrewal/Manny.CIFAR | Manny.CIFAR/CIFAR/CIFARPlotter.py | 1 | 1321 | import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pylab
########################################################################
# 2017 - Manny Grewal
# Purpose of this class is to visualise a list of images from the CIFAR dataset
# How many columns to show in a grid
MAX_COLS = 5
#PlotImages method takes an list of Images and their respective labels in the second parameter
#Then it renders them using matplotlib imshow method in a 5 column matrix
def PlotImages(arrayImages,arrayClassLabels,reShapeRequired=False):
totalImages=len(arrayImages)
if(reShapeRequired==True):
arrayImages = np.reshape(arrayImages, (totalImages,32,32,3))
totalRows= math.ceil(totalImages/MAX_COLS)
fig = plt.figure(figsize=(5,5))
gs = gridspec.GridSpec(totalImages, MAX_COLS)
# set the space between subplots and the position of the subplots in the figure
gs.update(wspace=0.1, hspace=0.4, left = 0.1, right = 0.7, bottom = 0.1, top = 0.9)
arrayIndex=0
for g in gs:
if(arrayIndex<totalImages):
axes=plt.subplot(g)
axes.set_axis_off()
axes.set_title(arrayClassLabels[arrayIndex])
axes.imshow(arrayImages[arrayIndex])
arrayIndex+=1
#plt.show() | mit |
terkkila/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
mailhexu/pyDFTutils | pyDFTutils/vasp/procar_reader.py | 2 | 3569 | #!/usr/bin/env python
from numpy import zeros,inner
import numpy as np
import re
from pyDFTutils.ase_utils import symbol_number
import matplotlib.pyplot as plt
def fix_line(line):
line=re.sub("(\d)-(\d)", r'\1 -\2',line)
return line
class procar_reader():
def __init__(self,fname='PROCAR'):
self.read(fname=fname)
def get_dos(self,iion,orb_name,iband):
dos=inner(self.dos_array[iion,self.orb_dict[orb_name],iband,:],self.weight)
return dos
def filter_band(self,iion,orb_name,thr=0.01):
"""
return a energy array 2D. energy(iband,nkpt).
"""
band_ids=[]
for iband in range(self.nbands):
d=self.get_dos(iion,orb_name,iband)
print(d)
if d>thr:
band_ids.append(iband)
return self.energy[np.array(band_ids,dtype=int)]
def plot_band(self,iion,orb_name,thr=0.01):
earray=self.filter_band(iion,orb_name,thr=thr)
for k_e in earray:
plt.plot(k_e)
plt.ylim(-3,2)
#plt.show()
def plot_band_alpha(self,iion,orb_name,color='k'):
for iband in range(self.nbands):
d=self.get_dos(iion,orb_name,iband)
print(d)
plt.plot(self.energy[iband],color,linewidth=d*50,alpha=0.5)
def read(self,fname='PROCAR'):
lines=open(fname).readlines()
iline=0
self.has_phase=bool(lines[iline].rfind('phase'))
iline=1
p=lines[iline].split()
self.nkpts=int(p[3])
self.nbands=int(p[7])
self.nions=int(p[11])
self.dos_label=lines[7].split()[1:-1]
self.norb=len(self.dos_label)
self.orb_dict=dict(list(zip(self.dos_label,list(range(self.norb)))))
print(self.orb_dict)
self.dos_array=zeros((self.nions,self.norb,self.nbands,self.nkpts),dtype='float')
self.weight=zeros(self.nkpts,dtype='float')
self.energy=zeros((self.nbands,self.nkpts))
self.band_occ=zeros((self.nbands,self.nkpts))
self.kpts=zeros((self.nkpts,3))
iline+=1
for ikpts in range(self.nkpts):
iline+=1
line_k=fix_line( lines[iline]).split()
self.kpts[ikpts]=[float(x) for x in line_k[3:6]]
self.weight[ikpts]=float(line_k[-1])
iline+=2
for iband in range(self.nbands):
line_b=lines[iline].split()
self.energy[iband,ikpts]=float(line_b[4])
self.band_occ[iband,ikpts]=float(line_b[7])
iline+=3
for iion in range(self.nions):
#print iline
line_dos=lines[iline].strip().split()
#print iline
#print line_dos
self.dos_array[iion,:,iband,ikpts]=[float(x) for x in line_dos[1:-1]]
iline+=1
#if self.has_phase:
#iline+=1+self.nions*2
iline+=3
self.efermi=np.max(self.energy[self.band_occ>0.5])
print(self.efermi)
self.energy=self.energy-self.efermi
def test(iion=0,orb_name='dx2',thr=0.005):
p=procar_reader()
#for e in p.filter_band(0,orb_name,thr=thr):
# plt.plot(e,'.',color='green')
#for e in p.filter_band(1,'dx2',thr=thr):
# plt.plot(e,'-',color='red')
#plt.plot(p.filter_band(iion,'dz2',thr=thr))
p.plot_band_alpha(1,'dx2',color='r')
p.plot_band_alpha(1,'dz2',color='g')
plt.ylim(-5,5)
plt.show()
if __name__=='__main__':
test()
| lgpl-3.0 |
alalbiol/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
shiinoandra/wavegano | Program/Wavegano/Wavegano/Wavegano.py | 1 | 21939 | import operation as op
import random
import math
import Helper
import GRDEI
import RDE
import GDE
import Wave
import os
import numpy
import matplotlib.pyplot as plt
numpy.set_printoptions(threshold=numpy.nan)
#def encode(payload_path,cover_path,threshold,segment_size,partition_segment_size,method):
# file_name = cover_path.split("\\")[len(cover_path.split("\\"))-1]
# path = cover_path.replace(file_name,'')
# print(" PROSES ENCODING ")
# print(" METODE YANG DIGUNAKAN : " + method)
# payload = Helper.payloadIO.open(payload_path)
# bin_payload = op.operation.stringToBinary(payload)
# print "besar payload : "
# payload_size = len(bin_payload)
# print(payload_size)
# medium = Helper.WavIO.open(cover_path)
# print "bitrate: "
# print(medium.bitrate)
# samples = op.operation.numToBinary(medium.samples)
# (M1,M2,Partisi) = op.operation.intel_partition(samples,partition_segment_size)
# intM1 = op.operation.binaryTonum(M1)
# intM2 = op.operation.binaryTonum(M2)
# if method == "GDE" :
# kapasitas_M1 = GDE.checkCapacity(intM1,segment_size,threshold)
# kapasitas_M2 = GDE.checkCapacity(intM2,segment_size,threshold)
# elif method == "GRDEI":
# kapasitas_M1 = GRDEI.checkCapacity(intM1,segment_size,threshold)
# kapasitas_M2 = GRDEI.checkCapacity(intM2,segment_size,threshold)
# print(" kapasitas segmen 1 : " + str(kapasitas_M1))
# print(" kapasitas segmen 2 : " + str(kapasitas_M2))
# capacity = kapasitas_M1+kapasitas_M2
# print "Kapasitas penyimpanan :"
# print(capacity)
# if capacity >= payload_size:
# print "stegano dapat dilakukan"
# payload_seg1 = bin_payload[:kapasitas_M1]
# payload_seg2 = bin_payload[kapasitas_M1:len(bin_payload)]
# if method == "GDE" :
# (encoded_1,locMap_1) = GDE.encode(intM1,payload_seg1,segment_size,threshold)
# (encoded_2,locMap_2) = GDE.encode(intM2,payload_seg2,segment_size,threshold)
# elif method == "GRDEI":
# (encoded_1,locMap_1,reduceMap_1) = GRDEI.encode(intM1,payload_seg1,segment_size,threshold)
# (encoded_2,locMap_2,reduceMap_2) = GRDEI.encode(intM2,payload_seg2,segment_size,threshold)
# encoded_1 = op.operation.numToBinary(encoded_1)
# encoded_2 = op.operation.numToBinary(encoded_2)
# for i in range(len(encoded_1)):
# encoded_1[i]=encoded_1[i][8:16]
# for i in range(len(encoded_2)):
# encoded_2[i]=encoded_2[i][8:16]
# encoded_1_bin = numpy.array(encoded_1,dtype=int)
# encoded_2_bin = numpy.array(encoded_2,dtype=int)
# _M = op.operation.reconstructPartition(encoded_1_bin,encoded_2_bin,Partisi)
# _M_int2= numpy.asarray(op.operation.binaryTonum(_M),dtype=numpy.uint16)
# _M_int = numpy.asarray(op.operation.binaryTonum(_M),dtype=numpy.int16)
# new_wav = Wave.Wave(method+"_encoded_"+str(file_name))
# new_wav.samples = _M_int
# new_wav.bitrate = medium.bitrate
# time_axis = numpy.linspace(0,len(medium.samples)/medium.bitrate,num=len(medium.samples))
# plt.subplot(3,1,1)
# plt.title("perbandingan wav asli dan hasil encode")
# plt.plot(time_axis,numpy.array(medium.samples,dtype= "int16"))
# plt.ylabel("WAV asli")
# plt.subplot(3,1,2)
# plt.plot(time_axis,numpy.array(_M_int,dtype= "int16"))
# plt.ylabel("Hasil Encode")
# plt.subplot(3,1,3)
# plt.plot(time_axis,numpy.array(_M_int,dtype= "int16"))
# plt.subplot(3,1,3)
# plt.plot(time_axis,numpy.array(medium.samples,dtype= "int16"))
# plt.ylabel("perbandingan")
# plt.xlabel("Waktu (s)")
# plt.savefig("original-encoded.png")
# #print(medium.samples)
# #raw_input()
# #print(new_wav.samples)
# #new_wav.print_info()
# #medium.print_info()
# Helper.WavIO.write(path,new_wav)
# if method == "GDE" :
# return(locMap_1,locMap_2,Partisi)
# elif method == "GRDEI":
# return(locMap_1,locMap_2,reduceMap_1,reduceMap_2,Partisi)
# else:
# print "kapasitas tidak mencukupi"
# return -1
def encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,Partisi,method):
if method == "GDE" :
(encoded_1,locMap_1) = GDE.encode(intM1,payload_seg1,segment_size,threshold)
(encoded_2,locMap_2) = GDE.encode(intM2,payload_seg2,segment_size,threshold)
elif method == "GRDEI":
(encoded_1,locMap_1,reduceMap_1) = GRDEI.encode(intM1,payload_seg1,segment_size,threshold)
(encoded_2,locMap_2,reduceMap_2) = GRDEI.encode(intM2,payload_seg2,segment_size,threshold)
encoded_1 = op.operation.numToBinary(encoded_1)
encoded_2 = op.operation.numToBinary(encoded_2)
for i in range(len(encoded_1)):
encoded_1[i]=encoded_1[i][8:16]
for i in range(len(encoded_2)):
encoded_2[i]=encoded_2[i][8:16]
encoded_1_bin = numpy.array(encoded_1,dtype=int)
encoded_2_bin = numpy.array(encoded_2,dtype=int)
_M = op.operation.reconstructPartition(encoded_1_bin,encoded_2_bin,Partisi)
_M_int2= numpy.asarray(op.operation.binaryTonum(_M),dtype=numpy.uint16)
if method == "GDE" :
return(_M_int2,locMap_1,locMap_2,Partisi)
elif method == "GRDEI":
return(_M_int2,locMap_1,locMap_2,reduceMap_1,reduceMap_2,Partisi)
def decode(file_path,segment_size,payload_size,method,Partition,locMap_1,locMap_2,reduceMap_1 = None , reduceMap_2 = None):
file_name = file_path.split("\\")[len(file_path.split("\\"))-1]
path = file_path.replace(file_name,'')
print(" PROSES DECODING ")
print(" METODE YANG DIGUNAKAN : " )
medium_encoded = Helper.WavIO.open(file_path)
print "bitrate: "
print(medium_encoded.bitrate)
samples_decode = op.operation.numToBinary(medium_encoded.samples)
(_M1,_M2,P) = op.operation.intel_partition(samples_decode,0,Partition)
_intM1 = op.operation.binaryTonum(_M1)
_intM2 = op.operation.binaryTonum(_M2)
if method == "GDE" :
(decoded_M1,message1) = GDE.decode(_intM1,segment_size,locMap_1)
(decoded_M2,message2) = GDE.decode(_intM2,segment_size,locMap_2)
elif method == "GRDEI":
(decoded_M1,message1) = GRDEI.decode(_intM1,segment_size,locMap_1,reduceMap_1)
(decoded_M2,message2) = GRDEI.decode(_intM2,segment_size,locMap_2,reduceMap_2)
message_decoded = []
message_decoded.extend(message1)
message_decoded.extend(message2)
message_decoded = message_decoded[:payload_size]
print(len(message_decoded))
message_write = op.operation.revStringToBinary(message_decoded)
Helper.payloadIO.write(path+"payload_decoded.txt",message_write)
decoded_1 = op.operation.numToBinary(decoded_M1)
decoded_2 = op.operation.numToBinary(decoded_M2)
for i in range(len(decoded_1)):
decoded_1[i]=decoded_1[i][8:16]
for i in range(len(decoded_2)):
decoded_2[i]=decoded_2[i][8:16]
decoded_1_bin = numpy.array(decoded_1,dtype=int)
decoded_2_bin = numpy.array(decoded_2,dtype=int)
M_awal = op.operation.reconstructPartition(decoded_1_bin,decoded_2_bin,Partition)
M__awal = numpy.asarray(op.operation.binaryTonum(M_awal),dtype=numpy.int16)
new_wav2 = Wave.Wave(file_name.replace("encoded","decoded"))
new_wav2.samples = M__awal
new_wav2.bitrate = medium_encoded.bitrate
plt.clf()
time_axis = numpy.linspace(0,len(medium_encoded.samples)/medium_encoded.bitrate,num=len(medium_encoded.samples))
plt.subplot(2,1,1)
plt.title("perbandingan hasil encode dan hasil decode")
plt.plot(time_axis,numpy.array(medium_encoded.samples,dtype= "int16"))
plt.ylabel("WAV encoded")
plt.subplot(2,1,2)
plt.plot(time_axis,numpy.array(M__awal,dtype= "int16"))
plt.ylabel("WAV Hasil Dencode")
plt.xlabel("Waktu (s)")
plt.savefig("encoded-decoded.png")
Helper.WavIO.write(path,new_wav2)
def multilayer_encode(payload_path,cover_path,threshold,segment_size,partition_segment_size,method,n_layer):
locmap_list = []
reducemap_list = []
capacities = []
payload_sizes = []
total_capacity = 0
file_name = cover_path.split("\\")[len(cover_path.split("\\"))-1]
path = cover_path.replace(file_name,'')
print(" PROSES ENCODING ")
print(" METODE YANG DIGUNAKAN : " + str(method))
payload = Helper.payloadIO.open(payload_path)
bin_payload = op.operation.stringToBinary(payload)
print "besar payload : "
payload_size = len(bin_payload)
print(payload_size)
medium = Helper.WavIO.open(cover_path)
print "bitrate: "
print(medium.bitrate)
audio_sample = medium.samples
#for i in range(n_layer):
# samples = op.operation.numToBinary(audio_sample)
# (M1,M2,Partisi) = op.operation.intel_partition(samples,partition_segment_size)
# intM1 = op.operation.binaryTonum(M1)
# intM2 = op.operation.binaryTonum(M2)
# if method == "GDE" :
# kapasitas_M1 = GDE.checkCapacity(intM1,segment_size,threshold)
# kapasitas_M2 = GDE.checkCapacity(intM2,segment_size,threshold)
# elif method == "GRDEI":
# kapasitas_M1 = GRDEI.checkCapacity(intM1,segment_size,threshold)
# kapasitas_M2 = GRDEI.checkCapacity(intM2,segment_size,threshold)
# print(" kapasitas segmen 1 : " + str(kapasitas_M1))
# print(" kapasitas segmen 2 : " + str(kapasitas_M2))
# capacity = kapasitas_M1+kapasitas_M2
# capacities.append((kapasitas_M1,kapasitas_M2))
# print "Kapasitas penyimpanan layer ke "+str(i)+":"+str(capacity)
# payload_seg1 = [1 for i in range(kapasitas_M1)]
# payload_seg2 = [1 for i in range(kapasitas_M2)]
# total_capacity+=capacity
# audio_sample = encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,Partisi,method)[0]
#print("total kapasitas "+str(total_capacity))
#if(total_capacity > payload_size):
# print("stegano dapat dilakukan")
#time_axis = numpy.linspace(0,len(medium.samples)/medium.bitrate,num=len(medium.samples))
#plt.subplot(n_layer+1,1,1)
#plt.title("perbandingan wav asli dan hasil encode multi-layer")
#plt.plot(time_axis,numpy.array(medium.samples,dtype= "int16"))
#plt.ylabel("WAV asli")
P = op.operation.intel_partition(op.operation.numToBinary(audio_sample),partition_segment_size)[2]
payload_counter = 0
for i in range(n_layer):
print("layer ke " + str(i))
samples = op.operation.numToBinary(audio_sample)
(M1,M2,Partisi) = op.operation.intel_partition(samples,partition_segment_size,P)
intM1 = op.operation.binaryTonum(M1)
intM2 = op.operation.binaryTonum(M2)
if method == "GDE" :
kapasitas_M1 = GDE.checkCapacity(intM1,segment_size,threshold)
kapasitas_M2 = GDE.checkCapacity(intM2,segment_size,threshold)
elif method == "GRDEI":
kapasitas_M1 = GRDEI.checkCapacity(intM1,segment_size,threshold)
kapasitas_M2 = GRDEI.checkCapacity(intM2,segment_size,threshold)
capacities.append((kapasitas_M1,kapasitas_M2))
kapasitas_total_layer = kapasitas_M1+kapasitas_M2
total_capacity += kapasitas_total_layer
if((payload_size - payload_counter) > kapasitas_total_layer):
payload_i = bin_payload[payload_counter:kapasitas_total_layer]
payload_sizes.append(kapasitas_total_layer)
payload_counter+=kapasitas_total_layer
print(len(payload_i))
else:
payload_i = bin_payload[payload_counter:payload_size]
payload_sizes.append((payload_size-payload_counter))
payload_counter+=(payload_size-payload_counter)
print(len(payload_i))
payload_seg1 = payload_i[:kapasitas_M1]
print(len(payload_seg1))
payload_seg2 = payload_i[kapasitas_M1:len(payload_i)]
print(len(payload_seg2))
if method == "GDE" :
(audio_sample,locMap_1,locMap_2,Partisi)= encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,P,method)
locmap_list.append((locMap_1,locMap_2))
elif method == "GRDEI":
(audio_sample,locMap_1,locMap_2,reduceMap_1,reduceMap_2,Partisi) = encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,P,method)
locmap_list.append((locMap_1,locMap_2))
reducemap_list.append((reduceMap_1,reduceMap_2))
#audio_sample = numpy.asarray(audio_sample2,dtype="uint16")
#plt.subplot(n_layer+1,1,i+2)
#plt.plot(time_axis,numpy.array(audio_sample,dtype= "int16"))
#plt.ylabel("Hasil Encode layer ke "+str(i+1))
#plt.xlabel("Waktu (s)")
#plt.savefig("original-multiencode-"+str(n_layer)+".png")
if(payload_counter>=payload_size):
print("stegano berhasil dilakukan")
print(total_capacity)
_M_int = numpy.asarray(audio_sample,dtype=numpy.int16)
new_wav = Wave.Wave(method+"_encoded_"+str(file_name))
new_wav.samples = _M_int
new_wav.bitrate = medium.bitrate
Helper.WavIO.write(path,new_wav)
if method == "GDE" :
return(locmap_list,payload_sizes,P)
elif method == "GRDEI":
return(locmap_list,reducemap_list,payload_sizes,P)
else:
print("kapasitas tidak mencukupi")
#else:
# print "kapasitas tidak mencukupi"
# return -1
def multilayer_decode(file_path,segment_size,payload_sizes,Partition,method,n_layer,locmap_list,reducemap_list = None,):
full_messages=[]
file_name = file_path.split("\\")[len(file_path.split("\\"))-1]
path = file_path.replace(file_name,'')
print(" PROSES DECODING ")
print(" METODE YANG DIGUNAKAN : " )
print(method)
medium_encoded = Helper.WavIO.open(file_path)
print "bitrate: "
print(medium_encoded.bitrate)
audio_samples = medium_encoded.samples
for i in range (n_layer):
print("layer ke " + str(i))
(locmap_i_1,locmap_i_2) = locmap_list.pop()
if(method == "GRDEI"):
(reducemap_i_1,reducemap_i_2) = reducemap_list.pop()
samples = op.operation.numToBinary(audio_samples)
(_M1,_M2,P) = op.operation.intel_partition(samples,0,Partition)
_intM1 = op.operation.binaryTonum(_M1)
_intM2 = op.operation.binaryTonum(_M2)
if method == "GDE" :
(decoded_M1,message1) = GDE.decode(_intM1,segment_size,locmap_i_1)
(decoded_M2,message2) = GDE.decode(_intM2,segment_size,locmap_i_2)
elif method == "GRDEI":
(decoded_M1,message1) = GRDEI.decode(_intM1,segment_size,locmap_i_1,reducemap_i_1)
(decoded_M2,message2) = GRDEI.decode(_intM2,segment_size,locmap_i_2,reducemap_i_2)
message_decoded = []
message_decoded.extend(message1)
message_decoded.extend(message2)
payload_i_size = payload_sizes.pop()
full_messages.insert(0,message_decoded[0:payload_i_size])
decoded_1 = op.operation.numToBinary(decoded_M1)
decoded_2 = op.operation.numToBinary(decoded_M2)
for i in range(len(decoded_1)):
decoded_1[i]=decoded_1[i][8:16]
for i in range(len(decoded_2)):
decoded_2[i]=decoded_2[i][8:16]
decoded_1_bin = numpy.array(decoded_1,dtype=int)
decoded_2_bin = numpy.array(decoded_2,dtype=int)
M_awal = op.operation.reconstructPartition(decoded_1_bin,decoded_2_bin,Partition)
audio_samples = numpy.asarray(op.operation.binaryTonum(M_awal),dtype=numpy.uint16)
for i in audio_samples:
if(i<0):
print(i)
message_write = []
for i in range(len(full_messages)):
message_write.extend(full_messages[i])
message_write = op.operation.revStringToBinary(message_write)
Helper.payloadIO.write(path+"payload_decoded.txt",message_write)
new_wav = Wave.Wave(file_name.replace("encoded","decoded"))
M__awal = numpy.asarray(audio_samples,dtype=numpy.int16)
new_wav.samples = M__awal
new_wav.bitrate = medium_encoded.bitrate
Helper.WavIO.write(path,new_wav)
if __name__ == "__main__":
#(map1,map2,rmap1,rmap2,p) = encode("D:\\payload.txt","D:\\coba16.wav",100,20,10,"GRDEI")
(locmap_list,reducemap_list,payload_sizes,partisi) = multilayer_encode("D:\\payload.txt","D:\\coba16.wav",50,20,10,"GRDEI",20)
#print(len(locmap_list))
# print(len(reducemap_list))
multilayer_decode("D:\\GRDEI_encoded_coba16.wav",20,payload_sizes,partisi,"GRDEI",20,locmap_list,reducemap_list)
#locmap_list.pop()
#(map1_1,map1_2) = locmap_list.pop()
#reducemap_list.pop()
#(rmap1_1,rmap1_2) = reducemap_list.pop()
#decode("D:\\GRDEI_encoded2_coba16.wav",20,1187,"GRDEI",partisi,map1_1,map1_2,rmap1_1,rmap1_2)
#print(len(locmap_list),len(reducemap_list))
# # print "data payload : "
# #print(payload)
# #arr = [13954, 4369, 37385, 3995, 2556, 46896, 13816, 17865, 40433, 42503, 27740, 14980, 22323, 27920, 48381, 40456, 58866, 60412, 36991, 30730, 14601, 31475, 50583, 57144, 18332, 46140, 47181, 62996, 19071, 30753, 55953, 62831, 8814, 44566, 2191, 16703, 36414, 55831, 28696, 43850]
# #samples = op.operation.numToBinary(arr)
# besar_segmen = 2
# threshold = 200
# ########### DECODING ###############
| mit |
drandykass/fatiando | gallery/gravmag/eqlayer_transform.py | 6 | 3046 | """
Equivalent layer for griding and upward-continuing gravity data
-------------------------------------------------------------------------
The equivalent layer is one of the best methods for griding and upward
continuing gravity data and much more. The trade-off is that performing this
requires an inversion and later forward modeling, which are more time consuming
and more difficult to tune than the standard griding and FFT-based approaches.
This example uses the equivalent layer in :mod:`fatiando.gravmag.eqlayer` to
grid and upward continue some gravity data. There are more advanced methods in
the module than the one we are showing here. They can be more efficient but
usually require more configuration.
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
from fatiando.gravmag import prism, sphere
from fatiando.gravmag.eqlayer import EQLGravity
from fatiando.inversion import Damping
from fatiando import gridder, utils, mesher
# First thing to do is make some synthetic data to test the method. We'll use a
# single prism to keep it simple
props = {'density': 500}
model = [mesher.Prism(-5000, 5000, -200, 200, 100, 4000, props)]
# The synthetic data will be generated on a random scatter of points
area = [-8000, 8000, -5000, 5000]
x, y, z = gridder.scatter(area, 300, z=0, seed=42)
# Generate some noisy data from our model
gz = utils.contaminate(prism.gz(x, y, z, model), 0.2, seed=0)
# Now for the equivalent layer. We must setup a layer of point masses where
# we'll estimate a density distribution that fits our synthetic data
layer = mesher.PointGrid(area, 500, (20, 20))
# Estimate the density using enough damping so that won't try to fit the error
eql = EQLGravity(x, y, z, gz, layer) + 1e-22*Damping(layer.size)
eql.fit()
# Now we add the estimated densities to our layer
layer.addprop('density', eql.estimate_)
# and print some statistics of how well the estimated layer fits the data
residuals = eql[0].residuals()
print("Residuals:")
print(" mean:", residuals.mean(), 'mGal')
print(" stddev:", residuals.std(), 'mGal')
# Now I can forward model gravity data anywhere we want. For interpolation, we
# calculate it on a grid. For upward continuation, at a greater height. We can
# even combine both into a single operation.
x2, y2, z2 = gridder.regular(area, (50, 50), z=-1000)
gz_up = sphere.gz(x2, y2, z2, layer)
fig, axes = plt.subplots(1, 2, figsize=(8, 6))
ax = axes[0]
ax.set_title('Original data')
ax.set_aspect('equal')
tmp = ax.tricontourf(y/1000, x/1000, gz, 30, cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0.1, aspect=30,
orientation='horizontal').set_label('mGal')
ax.plot(y/1000, x/1000, 'xk')
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
ax = axes[1]
ax.set_title('Gridded and upward continued')
ax.set_aspect('equal')
tmp = ax.tricontourf(y2/1000, x2/1000, gz_up, 30, cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0.1, aspect=30,
orientation='horizontal').set_label('mGal')
ax.set_xlabel('y (km)')
plt.tight_layout()
plt.show()
| bsd-3-clause |
g2p/systems | lib/systems/context.py | 1 | 17949 | # vim: set fileencoding=utf-8 sw=2 ts=2 et :
from __future__ import absolute_import
from __future__ import with_statement
from logging import getLogger
import networkx as NX
import yaml
from systems.collector import Aggregate, CResource
from systems.registry import get_registry
from systems.typesystem import EResource, Transition, ResourceRef
__all__ = ('Realizer', )
LOGGER = getLogger(__name__)
DESC_LIMIT = 64
def describe(thing):
return '%s' % str(thing)[:DESC_LIMIT]
class CycleError(Exception):
pass
class Node(object):
def __init__(self):
if type(self) == Node:
raise TypeError
def __repr__(self):
return '<%s>' % self
def __str__(self):
return type(self).__name__
class CheckPointNode(Node):
pass
class ExpandableNode(Node):
def __init__(self, res):
super(ExpandableNode, self).__init__()
if type(self) == ExpandableNode:
# Abstract class
raise TypeError
self._res = res
class BeforeExpandableNode(ExpandableNode):
def __str__(self):
return 'Before %s' % self._res
class AfterExpandableNode(ExpandableNode):
def __str__(self):
return 'After %s' % self._res
class GraphFirstNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphFirstNode'
class GraphLastNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphLastNode'
node_types = (CheckPointNode, BeforeExpandableNode, AfterExpandableNode,
GraphFirstNode, GraphLastNode,
Transition, Aggregate, CResource, EResource, ResourceRef)
class ResourceGraph(yaml.YAMLObject):
"""
A graph of resources and transitions linked by dependencies.
Resources are positioned as two sentinels in the transition graph.
Invariant: directed, acyclic.
"""
def __init__(self, top=None):
self._graph = NX.DiGraph()
self._first = GraphFirstNode()
self._last = GraphLastNode()
self._graph.add_edge(self._first, self._last)
# Contains CResource and EResource, despite the name.
# Used to enforce max one resource per id.
self.__expandables = {}
# Received references, by name.
self.__received_refs = {}
# What nodes were processed (meaning expanding or collecting)
self.__processed = set()
# Pre-bound args pased by ref. Allow putting extra depends on them.
if top is not None:
if not isinstance(top, ResourceGraph):
raise TypeError(top, ResourceGraph)
self.__top = top
else:
self.__top = self
yaml_tag = u'!ResourceGraph'
@classmethod
def from_yaml(cls, loader, ynode):
rg = cls()
# Deep because of aliases and anchors, I think.
mp = loader.construct_mapping(ynode, deep=True)
pred_rels = mp['nodes']
for rel in pred_rels:
rg._add_node(rel['node'], depends=rel['depends'])
return rg
@classmethod
def to_yaml(cls, dumper, rg):
# This is incomplete.
pred_rels = [{'node': node, 'depends': list(depends), }
for (node, depends) in rg._iter_pred_rels()]
return dumper.represent_mapping(cls.yaml_tag, {
'nodes': pred_rels,
})
def _iter_node_preds(self, node0):
return (node
for node in self._graph.predecessors_iter(node0)
if node not in (self._first, self._last))
def _iter_pred_rels(self):
return ((node, self._iter_node_preds(node))
for node in self.sorted_nodes()
if node not in (self._first, self._last))
def sorted_nodes(self):
return NX.topological_sort(self._graph)
def sorted_transitions(self):
return [n for n in self.sorted_nodes()
if isinstance(n, Transition)]
def iter_uncollected_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, CResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, EResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_aggregates(self):
for agg in self._graph.nodes_iter():
if isinstance(agg, Aggregate):
if not agg in self.__processed:
yield agg
def iter_unprocessed(self):
for nod in self.iter_uncollected_resources():
yield nod
for nod in self.iter_unexpanded_resources():
yield nod
for nod in self.iter_unexpanded_aggregates():
yield nod
def has_unprocessed(self):
l = list(self.iter_unprocessed())
return bool(l) # Tests for non-emptiness
def require_acyclic(self):
if not NX.is_directed_acyclic_graph(self._graph):
# XXX NX doesn't have a 1-line method for listing those cycles
raise CycleError
def _add_node(self, node, depends=()):
if not isinstance(node, node_types):
raise TypeError(node, node_types)
self._graph.add_node(node)
self._graph.add_edge(self._first, node)
self._graph.add_edge(node, self._last)
for dep in depends:
depn = self._intern(dep)
self._add_node_dep(depn, node)
return node
def add_checkpoint(self, depends=()):
return self._add_node(CheckPointNode(), depends)
def add_transition(self, transition, depends=()):
if not isinstance(transition, Transition):
raise TypeError(transition, Transition)
return self._add_node(transition, depends)
def _add_aggregate(self, aggregate, depends=()):
if not isinstance(aggregate, Aggregate):
raise TypeError(aggregate, Aggregate)
return self._add_node(aggregate, depends)
def add_resource(self, resource, depends=()):
"""
Add a resource.
If an identical resource exists, it is returned.
"""
if not isinstance(resource, (CResource, EResource)):
raise TypeError(resource, (CResource, EResource))
if resource.identity in self.__expandables:
# We have this id already.
# Either it's the exact same resource, or a KeyError is thrown.
resource = self._intern(resource)
# XXX Need to bypass _intern for already expanded.
# XXX When we use add_to_top, we sometimes have to deal
# with a resource that's already been expanded.
# Those are not in the graph anymore. How do we refer to them?
else:
self.__expandables[resource.identity] = resource
# Even if already there, we need to add the depends.
resource = self._add_node(resource, depends)
# If already there, notice we aliase it.
return self.make_ref(resource)
def make_ref(self, res, depends=()):
res = self._intern(res)
if not isinstance(res, (CResource, EResource)):
raise TypeError(res, (CResource, EResource))
depends = list(depends)
depends.append(res)
return self._add_node(ResourceRef(res), depends)
def make_alias_ref(self, ref, depends=()):
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
depends = list(depends)
depends.append(ref)
return self._add_node(ResourceRef(ref.unref), depends)
def add_to_top(self, res):
"""
Add a resource to the top ResourceGraph.
Use it to put things that you don't necessarily
want to be after the outside dependencies the current graph has.
"""
ref = self.__top.add_resource(res)
return self._add_node(ref)
def _add_node_dep(self, node0, node1):
if not isinstance(node0, node_types):
raise TypeError(node0, node_types)
if not isinstance(node1, node_types):
raise TypeError(node1, node_types)
if not self._graph.has_node(node0):
raise KeyError(node0)
if not self._graph.has_node(node1):
raise KeyError(node1)
if self._graph.has_edge(node0, node1):
return False
if node0 == node1:
# Disallow self-loops to keep acyclic invariant.
# Also they don't make sense.
raise ValueError(node0)
# Invariant check
rev_path = NX.shortest_path(self._graph, node1, node0)
if rev_path is not False:
raise CycleError(rev_path)
self._graph.add_edge(node0, node1)
return True
def _intern(self, thing):
if not isinstance(thing, node_types):
raise TypeError
if thing not in self._graph:
raise KeyError(thing)
return thing
def add_dependency(self, elem0, elem1):
node0 = self._intern(elem0)
node1 = self._intern(elem1)
return self._add_node_dep(node0, node1)
def _is_direct_rconnect(self, r0, r1):
s0 = self._intern(r0)
s1 = self._intern(r1)
# shortest_path is also a test for connectedness.
return bool(NX.shortest_path(self._graph, s0, s1))
def resources_connected(self, r0, r1):
return self._is_direct_rconnect(r0, r1) \
or self._is_direct_rconnect(r1, r0)
def draw(self, fname):
return self.draw_agraph(fname)
def draw_agraph(self, fname):
# XXX pygraphviz has steep dependencies (x11 libs)
# and recommends (texlive) for a headless box.
# We duplicate the graph, otherwise networkx / pygraphviz
# would make a lossy conversion (sometimes refusing to convert), by adding
# nodes as their string representation. Madness, I know.
gr2 = NX.create_empty_copy(self._graph, False)
for node in self._graph.nodes_iter():
gr2.add_node(id(node))
for (n0, n1) in self._graph.edges_iter():
gr2.add_edge(id(n0), id(n1))
names = dict((id(node), { 'label': describe(node)})
for node in self._graph.nodes_iter())
gr2.delete_node(id(self._first))
gr2.delete_node(id(self._last))
g = NX.to_agraph(gr2, {
'graph': {
'nodesep': '0.2',
'rankdir': 'TB',
'ranksep': '0.5',
},
'node': {
'shape': 'box',
},
},
names)
g.write(fname + '.dot')
# Dot is good for DAGs.
g.layout(prog='dot')
g.draw(fname + '.svg')
with open(fname + '.yaml', 'w') as f:
yaml.dump(self, f)
# Fails with the expanded graph, due to instancemethod
#yaml.load(yaml.dump(self))
def draw_matplotlib(self, fname):
# Pyplot is stateful and awkward to use.
import matplotlib.pyplot as P
# Disable hold or it definitely won't work (probably a bug).
P.hold(False)
NX.draw(self._graph)
P.savefig(fname)
def collect_resources(self, r0s, r1):
"""
Replace an iterable of resources with one new resource.
May break the acyclic invariant, caveat emptor.
"""
# The invariant is kept iff the r0s don't have paths linking them.
# For our use case (collectors), we could allow paths provided they are
# internal to r0s. This introduces self-loops that we would then remove.
for r0 in r0s:
r0 = self._intern(r0)
if r0 in self.__processed:
raise RuntimeError
if r1 in self._graph:
raise ValueError(r1)
r1 = self._add_aggregate(r1)
for r0 in r0s:
r0 = self._intern(r0)
self._move_edges(r0, r1)
self.__processed.add(r0)
self.require_acyclic()
def _move_edges(self, n0, n1):
if n0 == n1:
raise RuntimeError
n0 = self._intern(n0)
n1 = self._intern(n1)
# list is used as a temporary
# add after delete in case of same.
for pred in list(self._graph.predecessors_iter(n0)):
self._graph.delete_edge(pred, n0)
self._graph.add_edge(pred, n1)
for succ in list(self._graph.successors_iter(n0)):
self._graph.delete_edge(n0, succ)
self._graph.add_edge(n1, succ)
self._graph.delete_node(n0)
# Can't undo. Invariant will stay broken.
def _split_node(self, res):
res = self._intern(res)
before = self._add_node(BeforeExpandableNode(res))
after = self._add_node(AfterExpandableNode(res))
self._graph.add_edge(before, after)
for pred in list(self._graph.predecessors_iter(res)):
self._graph.delete_edge(pred, res)
self._graph.add_edge(pred, before)
for succ in list(self._graph.successors_iter(res)):
self._graph.delete_edge(res, succ)
self._graph.add_edge(after, succ)
self._graph.delete_node(res)
return before, after
def _receive_by_ref(self, name, ref):
if name in self.__received_refs:
raise RuntimeError(name, ref)
ref = self._add_node(ref)
self.__received_refs[name] = ref
return ref
def _pass_by_ref(self, subgraph, name, ref):
# The origin/value distinction is important
# for aliased arguments (two refs, same val).
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
subgraph._receive_by_ref(name, ref)
def expand_resource(self, res):
"""
Replace res by a small resource graph.
The resource_graph is inserted in the main graph
between the sentinels that represent the resource.
"""
res = self._intern(res)
# We're processing from the outside in.
if res in self.__processed:
raise RuntimeError
resource_graph = ResourceGraph(self.__top)
if isinstance(res, EResource):
for (name, ref) in res.iter_passed_by_ref():
# ref will be present in both graphs.
self._pass_by_ref(resource_graph, name, ref)
elif isinstance(res, Aggregate):
pass
else:
raise TypeError(res)
res.expand_into(resource_graph)
# We expand from the outside in
if bool(resource_graph.__processed):
raise RuntimeError
# Do not skip sentinels.
for n in resource_graph._graph.nodes_iter():
self._add_node(n)
for (n0, n1) in resource_graph._graph.edges_iter():
self._add_node_dep(n0, n1)
for (id1, res1) in resource_graph.__expandables.iteritems():
# We expand from the outside in.
assert res1 not in self.__processed
if id1 in self.__expandables:
# Pass by reference if you must use the same resource
# in different contexts.
raise RuntimeError('ResourceBase collision.', res, res1)
else:
self.__expandables[id1] = res1
before, after = self._split_node(res)
self.__processed.add(res)
self._move_edges(resource_graph._first, before)
self._move_edges(resource_graph._last, after)
# What may break the invariant:
# Passing a ref to res, and making res depend on ref.
# ref ends up on both sides of ref.before.
self.require_acyclic()
class Realizer(object):
"""
A graph of realizables linked by dependencies.
"""
def __init__(self, expandable):
self.__resources = ResourceGraph()
self.__expandable = expandable
self.__state = 'init'
def require_state(self, state):
"""
Raise an exception if we are not in the required state.
"""
if self.__state != state:
raise RuntimeError(u'Realizer state should be «%s»' % state)
def ensure_frozen(self):
"""
Build the finished dependency graph.
Merge identical realizables, collect what can be.
"""
if self.__state == 'frozen':
return
# Order is important
self.require_state('init')
self.__expandable.expand_into(self.__resources)
#self.__resources.draw('/tmp/freezing')
self._expand()
#self.__resources.draw('/tmp/pre-collect')
self._collect()
self._expand_aggregates()
assert not bool(list(self.__resources.iter_unprocessed()))
self.__state = 'frozen'
#self.__resources.draw('/tmp/frozen')
def _collect(self):
# Collects compatible nodes into merged nodes.
def can_merge(part0, part1):
for n0 in part0:
for n1 in part1:
if self.__resources.resources_connected(n0, n1):
return False
return True
def possibly_merge(partition):
# Merge once if possible. Return true if did merge.
e = dict(enumerate(partition))
n = len(partition)
# Loop over the triangle of unordered pairs
for i in xrange(n):
for j in xrange(i + 1, n):
part0, part1 = e[i], e[j]
if can_merge(part0, part1):
partition.add(part0.union(part1))
partition.remove(part0)
partition.remove(part1)
return True
return False
reg = get_registry()
for collector in reg.collectors:
# Pre-partition is made of parts acceptable for the collector.
pre_partition = collector.partition(
[r for r in self.__resources.iter_uncollected_resources()
if collector.filter(r)])
for part in pre_partition:
# Collector parts are split again, the sub-parts are merged
# when dependencies allow.
# Not a particularly efficient algorithm, just simple.
# Gives one solution among many possibilities.
partition = set(frozenset((r, ))
for r in part
for part in pre_partition)
while possibly_merge(partition):
pass
# Let the collector handle the rest
for part in partition:
if not bool(part):
# Test for emptiness.
# Aggregate even singletons.
continue
merged = collector.collect(part)
self.__resources.collect_resources(part, merged)
assert not bool(list(self.__resources.iter_uncollected_resources()))
def _expand(self):
# Poor man's recursion
while True:
fresh = set(r
for r in self.__resources.iter_unexpanded_resources())
if bool(fresh) == False: # Test for emptiness
break
for r in fresh:
self.__resources.expand_resource(r)
assert not bool(list(self.__resources.iter_unexpanded_resources()))
def _expand_aggregates(self):
for a in list(self.__resources.iter_unexpanded_aggregates()):
self.__resources.expand_resource(a)
assert not bool(list(self.__resources.iter_unexpanded_aggregates()))
# Enforce the rule that aggregates can only expand into transitions.
if self.__resources.has_unprocessed():
raise RuntimeError(list(self.__resources.iter_unprocessed()))
def realize(self):
"""
Realize all realizables and transitions in dependency order.
"""
self.ensure_frozen()
for t in self.__resources.sorted_transitions():
t.realize()
self.__state = 'realized'
| gpl-2.0 |
hsiaoyi0504/scikit-learn | sklearn/manifold/t_sne.py | 106 | 20057 | # Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
canast02/csci544_fall2016_project | yelp-sentiment/experiments/sentiment_stochasticGradientDescent.py | 1 | 2641 | import numpy as np
from nltk import TweetTokenizer, accuracy
from nltk.stem.snowball import EnglishStemmer
from sklearn import svm, linear_model
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sentiment_util import load_datasets
def main():
# x, y = load_dataset("datasets/sentiment_uci/yelp_labelled.txt")
x, y = load_datasets(["../datasets/sentiment_uci/yelp_labelled.txt"])
stopwords = set()
with open('../stopwords.txt', 'r') as f:
for w in f:
stopwords.add(w)
tok = TweetTokenizer()
stemmer = EnglishStemmer()
vectorizer = TfidfVectorizer(sublinear_tf=True, use_idf=True, binary=True, preprocessor=stemmer.stem,
tokenizer=tok.tokenize, ngram_range=(1, 2))
accu_p = np.zeros(shape=(2,))
accu_r = np.zeros(shape=(2,))
accu_f = np.zeros(shape=(2,))
accu_a = 0.0
folds = 10
for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True):
train_x, train_y = x[train_idx], y[train_idx]
test_x, test_y = x[test_idx], y[test_idx]
cls = linear_model.SGDClassifier(loss='hinge', penalty='l2', n_iter=100)
# train
train_x = vectorizer.fit_transform(train_x).toarray()
cls.fit(train_x, train_y)
# test
test_x = vectorizer.transform(test_x).toarray()
pred_y = cls.predict(test_x)
# evaluate
p, r, f, _ = precision_recall_fscore_support(test_y, pred_y)
a = accuracy_score(test_y, pred_y)
accu_p += p
accu_r += r
accu_f += f
accu_a += a
print("Evaluating classifier:")
print("\tAccuracy: {}".format(a))
print("\tPrecision[0]: {}".format(p[0]))
print("\tPrecision[1]: {}".format(p[1]))
print("\tRecall[0]: {}".format(r[0]))
print("\tRecall[1]: {}".format(r[1]))
print("\tF1-score[0]: {}".format(f[0]))
print("\tF1-score[1]: {}".format(f[1]))
print("Average evaluation")
print("\tAccuracy: {}".format(accu_a / folds))
print("\tPrecision[0]: {}".format(accu_p[0] / folds))
print("\tPrecision[1]: {}".format(accu_p[1] / folds))
print("\tRecall[0]: {}".format(accu_r[0] / folds))
print("\tRecall[1]: {}".format(accu_r[1] / folds))
print("\tF1-score[0]: {}".format(accu_f[0] / folds))
print("\tF1-score[1]: {}".format(accu_f[1] / folds))
if __name__ == '__main__':
main()
| gpl-3.0 |
Lyleo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| gpl-3.0 |
sliwhu/UWHousingTeam | model/house_price_model.py | 1 | 6622 | """
Contains the house price model.
DON'T USE THIS MODEL! Use the HousePriceModel in house_price_model_2.py.
"""
import os
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import RidgeCV
# Constants
BASE_DATE = pd.to_datetime('20140101', format='%Y%m%d', errors='ignore')
TO_TYPE = 'category'
# Note: It is expected that the following environment variables will be set so
# that the house price model will be able to locate its training data:
#
# SALES_DATA_PATH: The path of the sales data training file, e.g.: "~/directory"
# SALES_DATA_FILE: The name of the sales data training file, e.g.: "File.csv"
#
# os.environ["SALES_DATA_PATH"] = '~/UW Data Science/DATA 515A/Project'
# os.environ["SALES_DATA_FILE"] = 'Merged_Data_excel.csv' # 'KingCountyHomeSalesData.csv'
# Construct the sales data path, and read the sales data.
SALES_DATA_PATH = os.path.join(os.environ['SALES_DATA_PATH'], os.environ['SALES_DATA_FILE'])
SALES_DATA = pd.read_csv(SALES_DATA_PATH, parse_dates=['date'])
# Data cleansing plan:
#
# id: Discard
# date: Convert to integer; make categorical
# price: No conversion
# bedrooms: No conversion
# bathrooms: No conversion
# sqft_living: No conversion
# sqft_lot: No conversion
# floors: Make categorical
# waterfront: Make categorical
# view: Make categorical
# condition: Make categorical
# grade: Make categorical
# sqft_above: No conversion
# sqft_basement: No conversion
# yr_built: Make categorical
# yr_renovated: Copy over yr_built if missing; make categorical
# zipcode: Make categorical
# lat: No conversion
# long: No conversion
# sqft_living15 No conversion
# sqft_lot15 No conversion
# list_price No conversion
def construct_models():
"""
Constructs a ridge regression model, and a random forest model for housing
price data.
:return: A ridge regression model, and a random forest model for housing
price data
"""
return train_models(create_model_data_frame(SALES_DATA))
def create_model_data_frame(source):
"""
Creates a data frame suitable for constructing a model.
:param source: The source data frame
:return: A data frame suitable for constructing a model
"""
# Create an empty data frame. Get the date series from the source.
my_model_data = pd.DataFrame()
sales_date = source['date']
# Extract the sales date as an integer.
my_model_data['sale_day'] =\
(sales_date - get_base_date()).astype('timedelta64[D]').astype(int) + 1
# Extract the sale day-of-week as an integer, and the sale day in month.
my_model_data['sale_day_of_week'] = sales_date.dt.dayofweek.astype(TO_TYPE)
my_model_data['sale_day_in_month'] = sales_date.dt.day.astype(TO_TYPE)
# Extract common features as numeric, or categorical values.
# create_model_feature(my_model_data, source, 'price', False)
create_model_feature(my_model_data, source, 'price', False)
create_model_feature(my_model_data, source, 'bedrooms', False)
create_model_feature(my_model_data, source, 'bathrooms', False)
create_model_feature(my_model_data, source, 'sqft_living', False)
create_model_feature(my_model_data, source, 'sqft_lot', False)
create_model_feature(my_model_data, source, 'floors', True)
create_model_feature(my_model_data, source, 'waterfront', True)
create_model_feature(my_model_data, source, 'view', True)
create_model_feature(my_model_data, source, 'condition', True)
create_model_feature(my_model_data, source, 'grade', True)
create_model_feature(my_model_data, source, 'sqft_above', False)
create_model_feature(my_model_data, source, 'sqft_basement', False)
create_model_feature(my_model_data, source, 'yr_built', True)
# Use 'year built' in place of 'year renovated' if year renovated is zero
# in the source.
field_name = 'yr_renovated'
my_model_data[field_name] = pd.Categorical(np.where(
source[field_name] == 0,
source['yr_built'].astype(TO_TYPE),
source[field_name].astype(TO_TYPE)))
# Extract more common features as numeric, or categorical values.
create_model_feature(my_model_data, source, 'zipcode', True)
create_model_feature(my_model_data, source, 'lat', False)
create_model_feature(my_model_data, source, 'long', False)
create_model_feature(my_model_data, source, 'sqft_living15', False)
create_model_feature(my_model_data, source, 'sqft_lot15', False)
my_model_data['list_price'] = source['List price']
# Return the completed model data frame.
return my_model_data
def create_model_feature(destination, source, name, to_categorical=False):
"""
Creates a feature in a destination data frame.
:param destination: The destination data frame
:param source: The source data frame
:param name: The name of the feature to copy
:param to_categorical: True if the feature should be converted to
categorical, false otherwise
:return: None
"""
if to_categorical:
destination[name] = source[name].astype(TO_TYPE)
else:
destination[name] = source[name]
return None
def get_base_date():
"""
Gets the base date as a reference for day of sale.
:return: The base date as a reference for day of sale
"""
return BASE_DATE
def train_models(my_model_data):
"""
Trains a ridge regression model, and a random forest model, and returns
them.
:param my_model_data: The model data on which to train
:return: A ridge regression model, and a random forest model
"""
# Construct the ridge regression model.
my_ridge_model = RidgeCV(alphas=(0.1, 1.0, 10.0),
fit_intercept=True,
normalize=True,
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=True)
# Construct the random forest model.
my_forest_model = RandomForestRegressor()
# Divide the model data into predictor and response.
response_field = 'price'
predictors = my_model_data.ix[:, response_field != my_model_data.columns]
response = my_model_data[response_field]
# Fit the models, and return them.
my_ridge_model.fit(X=predictors, y=response)
my_forest_model.fit(X=predictors, y=response)
return my_ridge_model, my_forest_model
| mit |
renatopp/liac | liac/dataset/__init__.py | 1 | 3050 | # =============================================================================
# Federal University of Rio Grande do Sul (UFRGS)
# Connectionist Artificial Intelligence Laboratory (LIAC)
# Renato de Pontes Pereira - rppereira@inf.ufrgs.br
# =============================================================================
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
'''
This module is an interface to pandas and provides some utility functions for
handling dataset.
'''
import os
import pandas as pd
from . import arff
__all__ = ['load', 'read_csv', 'read_clipboard', 'read_arff']
read_csv = pd.read_csv
read_clipboard = pd.read_clipboard
def read_arff(set_name):
'''
Read ARFF file into pandas DataFrame.
:param set_name: the dataset path.
'''
f = open(set_name)
info = arff.load(f)
f.close()
attributes = [a[0] for a in info['attributes']]
data = info['data']
return pd.DataFrame(data, columns=attributes)
def load(set_name, *args, **kwargs):
'''
This function loads automatically any dataset in the following formats:
arff; csv; excel; hdf; sql; json; html; stata; clipboard; pickle. Moreover,
it loads the default datasets such "iris" if the extension in `set_name` is
unknown.
:param set_name: the dataset path or the default dataset name.
:returns: a `pd.DataFrame` object.
'''
_, ext = os.path.splitext(set_name)
if ext == '.arff':
loader = read_arff
elif ext in ['.csv', '.txt']:
loader = read_csv
else:
loader = __load_default_set
dataset = loader(set_name, *args, **kwargs)
return dataset
def __load_default_set(set_name):
ALIASES = {'linaker':'linaker1v'}
name = ''.join([ALIASES.get(set_name, set_name), '.arff'])
file_name = os.path.join(os.path.dirname(__file__), 'sets', name)
return read_arff(file_name)
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/signal/spectral.py | 4 | 66089 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.00099728892368242854
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if necessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input validation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# http://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| gpl-3.0 |
se4u/pylearn2 | pylearn2/sandbox/cuda_convnet/bench.py | 44 | 3589 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
output = FilterActs()(images, filters)
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01v = base_image_value.transpose(3,0,1,2)
filters_bc01v = base_filters_value.transpose(3,0,1,2)
filters_bc01v = filters_bc01v[:,:,::-1,::-1]
images_bc01 = shared(images_bc01v)
filters_bc01 = shared(filters_bc01v)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', image_shape = images_bc01v.shape,
filter_shape = filters_bc01v.shape)
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 64,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
"""
| bsd-3-clause |
0asa/sparklingpandas | sparklingpandas/test/pandas_groupby_tests.py | 2 | 9480 | """
Test our groupby support based on the pandas groupby tests.
"""
#
# This file is licensed under the Pandas 3 clause BSD license.
#
from tempfile import NamedTemporaryFile
from sparklingpandas.test.sparklingpandastestcase import \
SparklingPandasTestCase
import sys
import pandas as pd
from pandas import date_range, bdate_range, Timestamp
from pandas.core.index import Index, MultiIndex, Int64Index
from pandas.core.common import rands
from pandas.core.api import Categorical, DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_index_equal, assertRaisesRegexp)
from pandas.compat import(
range, long, lrange, StringIO, lmap, lzip, map,
zip, builtins, OrderedDict
)
from pandas import compat
import pandas.util.testing as tm
import unittest2
import numpy as np
class PandasGroupby(SparklingPandasTestCase):
def setUp(self):
"""
Setup the dataframes used for the groupby tests derived from pandas
"""
self.dateRange = bdate_range('1/1/2005', periods=250)
self.stringIndex = Index([rands(8).upper() for x in range(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v)
for k, v in compat.iteritems(self.groupId))
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(np.random.randn(8),
dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
super(self.__class__, self).setUp()
def test_first_last_nth(self):
# tests for first / last / nth
ddf = self.psc.from_data_frame(self.df)
assert_frame_equal(ddf.collect(), self.df)
grouped = self.psc.from_data_frame(self.df).groupby('A')
first = grouped.first().collect()
expected = self.df.ix[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0).collect()
assert_frame_equal(nth, expected)
last = grouped.last().collect()
expected = self.df.ix[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1).collect()
assert_frame_equal(nth, expected)
nth = grouped.nth(1).collect()
expected = self.df.ix[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
@unittest2.expectedFailure
def test_getitem(self):
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
self.assertTrue(com.isnull(grouped['B'].first()['foo']))
self.assertTrue(com.isnull(grouped['B'].last()['foo']))
# not sure what this is testing
self.assertTrue(com.isnull(grouped['B'].nth(0)[0]))
@unittest2.expectedFailure
def test_new_in0140(self):
"""
Test new functionality in 0.14.0. This currently doesn't work.
"""
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
ddf = self.psc.from_data_frame(df)
g = ddf.groupby('A')
result = g.first().collect()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any').collect()
assert_frame_equal(result, expected)
@unittest2.expectedFailure
def test_first_last_nth_dtypes(self):
"""
We do groupby fine on mixed types, but our copy from local dataframe
ends up re-running the guess type function, so the dtypes don't match.
Issue #25
"""
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = ddf.groupby('A')
first = grouped.first().collect()
expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last().collect()
expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1).collect()
expected = df.ix[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
def test_var_on_multiplegroups(self):
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'data3': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
ddf = self.psc.from_data_frame(df)
dgrouped = ddf.groupby(['key1', 'key2'])
grouped = df.groupby(['key1', 'key2'])
assert_frame_equal(dgrouped.var().collect(), grouped.var())
def test_agg_api(self):
# Note: needs a very recent version of pandas to pass
# TODO(holden): Pass this test if local fails
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
ddf = self.psc.from_data_frame(df)
dgrouped = ddf.groupby('key1')
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = dgrouped.agg(peak_to_peak).collect()
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
dgrouped = self.psc.from_data_frame(
self.tsframe).groupby(
[lambda x: x.year, lambda x: x.month])
result = dgrouped.agg(np.mean).collect()
expected = grouped.agg(np.mean)
assert_frame_equal(result, expected)
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tests/test_coding_standards.py | 7 | 12216 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from fnmatch import fnmatch
import os
from nose.tools import assert_equal
from nose.plugins.skip import SkipTest
from matplotlib.testing.noseclasses import KnownFailureTest
try:
import pep8
except ImportError:
HAS_PEP8 = False
else:
HAS_PEP8 = pep8.__version__ > '1.4.5'
import matplotlib
PEP8_ADDITIONAL_IGNORE = ['E111',
'E114',
'E115',
'E116',
'E121',
'E122',
'E123',
'E124',
'E125',
'E126',
'E127',
'E128',
'E129',
'E131',
'E265',
'E266',
'W503']
EXTRA_EXCLUDE_FILE = os.path.join(os.path.dirname(__file__),
'.pep8_test_exclude.txt')
if HAS_PEP8:
class StandardReportWithExclusions(pep8.StandardReport):
#: A class attribute to store the exception exclusion file patterns.
expected_bad_files = []
#: A class attribute to store the lines of failing tests.
_global_deferred_print = []
#: A class attribute to store patterns which have seen exceptions.
matched_exclusions = set()
def get_file_results(self):
# If the file had no errors, return self.file_errors
# (which will be 0).
if not self._deferred_print:
return self.file_errors
# Iterate over all of the patterns, to find a possible exclusion.
# If the filename is to be excluded, go ahead and remove the
# counts that self.error added.
for pattern in self.expected_bad_files:
if fnmatch(self.filename, pattern):
self.matched_exclusions.add(pattern)
# invert the error method's counters.
for _, _, code, _, _ in self._deferred_print:
self.counters[code] -= 1
if self.counters[code] == 0:
self.counters.pop(code)
self.messages.pop(code)
self.file_errors -= 1
self.total_errors -= 1
return self.file_errors
# mirror the content of StandardReport, only storing the output to
# file rather than printing. This could be a feature request for
# the PEP8 tool.
self._deferred_print.sort()
for line_number, offset, code, text, _ in self._deferred_print:
self._global_deferred_print.append(
self._fmt % {'path': self.filename,
'row': self.line_offset + line_number,
'col': offset + 1, 'code': code,
'text': text})
return self.file_errors
def assert_pep8_conformance(module=matplotlib, exclude_files=None,
extra_exclude_file=EXTRA_EXCLUDE_FILE,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE,
dirname=None, expected_bad_files=None,
extra_exclude_directories=None):
"""
Tests the matplotlib codebase against the "pep8" tool.
Users can add their own excluded files (should files exist in the
local directory which is not in the repository) by adding a
".pep8_test_exclude.txt" file in the same directory as this test.
The file should be a line separated list of filenames/directories
as can be passed to the "pep8" tool's exclude list.
"""
if not HAS_PEP8:
raise SkipTest('The pep8 tool is required for this test')
# to get a list of bad files, rather than the specific errors, add
# "reporter=pep8.FileReport" to the StyleGuide constructor.
pep8style = pep8.StyleGuide(quiet=False,
reporter=StandardReportWithExclusions)
reporter = pep8style.options.reporter
if expected_bad_files is not None:
reporter.expected_bad_files = expected_bad_files
# Extend the number of PEP8 guidelines which are not checked.
pep8style.options.ignore = (pep8style.options.ignore +
tuple(pep8_additional_ignore))
# Support for egg shared object wrappers, which are not PEP8 compliant,
# nor part of the matplotlib repository.
# DO NOT ADD FILES *IN* THE REPOSITORY TO THIS LIST.
if exclude_files is not None:
pep8style.options.exclude.extend(exclude_files)
# Allow users to add their own exclude list.
if extra_exclude_file is not None and os.path.exists(extra_exclude_file):
with open(extra_exclude_file, 'r') as fh:
extra_exclude = [line.strip() for line in fh if line.strip()]
pep8style.options.exclude.extend(extra_exclude)
if extra_exclude_directories:
pep8style.options.exclude.extend(extra_exclude_directories)
if dirname is None:
dirname = os.path.dirname(module.__file__)
result = pep8style.check_files([dirname])
if reporter is StandardReportWithExclusions:
msg = ("Found code syntax errors (and warnings):\n"
"{0}".format('\n'.join(reporter._global_deferred_print)))
else:
msg = "Found code syntax errors (and warnings)."
assert_equal(result.total_errors, 0, msg)
# If we've been using the exclusions reporter, check that we didn't
# exclude files unnecessarily.
if reporter is StandardReportWithExclusions:
unexpectedly_good = sorted(set(reporter.expected_bad_files) -
reporter.matched_exclusions)
if unexpectedly_good:
raise ValueError('Some exclude patterns were unnecessary as the '
'files they pointed to either passed the PEP8 '
'tests or do not point to a file:\n '
'{0}'.format('\n '.join(unexpectedly_good)))
def test_pep8_conformance_installed_files():
exclude_files = ['_delaunay.py',
'_image.py',
'_tri.py',
'_backend_agg.py',
'_tkagg.py',
'ft2font.py',
'_cntr.py',
'_contour.py',
'_png.py',
'_path.py',
'ttconv.py',
'_gtkagg.py',
'_backend_gdk.py',
'pyparsing*',
'_qhull.py',
'_macosx.py']
expected_bad_files = ['_cm.py',
'_mathtext_data.py',
'backend_bases.py',
'cbook.py',
'collections.py',
'dviread.py',
'font_manager.py',
'fontconfig_pattern.py',
'gridspec.py',
'legend_handler.py',
'mathtext.py',
'patheffects.py',
'pylab.py',
'pyplot.py',
'rcsetup.py',
'stackplot.py',
'texmanager.py',
'transforms.py',
'type1font.py',
'widgets.py',
'testing/decorators.py',
'testing/jpl_units/Duration.py',
'testing/jpl_units/Epoch.py',
'testing/jpl_units/EpochConverter.py',
'testing/jpl_units/StrConverter.py',
'testing/jpl_units/UnitDbl.py',
'testing/jpl_units/UnitDblConverter.py',
'testing/jpl_units/UnitDblFormatter.py',
'testing/jpl_units/__init__.py',
'tri/triinterpolate.py',
'tests/test_axes.py',
'tests/test_bbox_tight.py',
'tests/test_delaunay.py',
'tests/test_dviread.py',
'tests/test_image.py',
'tests/test_legend.py',
'tests/test_lines.py',
'tests/test_mathtext.py',
'tests/test_rcparams.py',
'tests/test_simplification.py',
'tests/test_streamplot.py',
'tests/test_subplots.py',
'tests/test_tightlayout.py',
'tests/test_triangulation.py',
'compat/subprocess.py',
'backends/__init__.py',
'backends/backend_agg.py',
'backends/backend_cairo.py',
'backends/backend_cocoaagg.py',
'backends/backend_gdk.py',
'backends/backend_gtk.py',
'backends/backend_gtk3.py',
'backends/backend_gtk3cairo.py',
'backends/backend_gtkagg.py',
'backends/backend_gtkcairo.py',
'backends/backend_macosx.py',
'backends/backend_mixed.py',
'backends/backend_pgf.py',
'backends/backend_ps.py',
'backends/backend_svg.py',
'backends/backend_template.py',
'backends/backend_tkagg.py',
'backends/tkagg.py',
'backends/windowing.py',
'backends/qt_editor/formlayout.py',
'sphinxext/mathmpl.py',
'sphinxext/only_directives.py',
'sphinxext/plot_directive.py',
'projections/__init__.py',
'projections/geo.py',
'projections/polar.py',
'externals/six.py']
expected_bad_files = ['*/matplotlib/' + s for s in expected_bad_files]
assert_pep8_conformance(module=matplotlib,
exclude_files=exclude_files,
expected_bad_files=expected_bad_files)
def test_pep8_conformance_examples():
mpldir = os.environ.get('MPL_REPO_DIR', None)
if mpldir is None:
# try and guess!
fp = os.getcwd()
while len(fp) > 2:
if os.path.isdir(os.path.join(fp, 'examples')):
mpldir = fp
break
fp, tail = os.path.split(fp)
if mpldir is None:
raise KnownFailureTest("can not find the examples, set env "
"MPL_REPO_DIR to point to the top-level path "
"of the source tree")
exdir = os.path.join(mpldir, 'examples')
blacklist = ()
expected_bad_files = ['*/pylab_examples/table_demo.py',
'*/pylab_examples/tricontour_demo.py',
'*/pylab_examples/tripcolor_demo.py',
'*/pylab_examples/triplot_demo.py',
'*/shapes_and_collections/artist_reference.py']
assert_pep8_conformance(dirname=exdir,
extra_exclude_directories=blacklist,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE +
['E116', 'E501', 'E402'],
expected_bad_files=expected_bad_files)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
tedmeeds/tcga_encoder | tcga_encoder/utils/helpers.py | 1 | 3776 | import tensorflow
import tcga_encoder
import sys, os, yaml
import numpy as np
import scipy as sp
import pylab as pp
import pandas as pd
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold
from collections import *
import itertools
import pdb
def xval_folds( n, K, randomize = False, seed = None ):
if randomize is True:
print("XVAL RANDOMLY PERMUTING")
if seed is not None:
print( "XVAL SETTING SEED = %d"%(seed) )
np.random.seed(seed)
x = np.random.permutation(n)
else:
print( "XVAL JUST IN ARANGE ORDER")
x = np.arange(n,dtype=int)
kf = KFold( K )
train = []
test = []
for train_ids, test_ids in kf.split( x ):
#train_ids = np.setdiff1d( x, test_ids )
train.append( x[train_ids] )
test.append( x[test_ids] )
#pdb.set_trace()
return train, test
def chunks(l, n):
#Yield successive n-sized chunks from l.
for i in xrange(0, len(l), n):
yield l[i:i + n]
def load_gmt( filename ):
with open(filename, 'r') as f:
pathway2genes = OrderedDict()
gene2pathways = OrderedDict()
for line in f.readlines():
splits = line.split("\t")
splits[-1] = splits[-1].rstrip("\n")
#pdb.set_trace()
if splits[0][:9] == "HALLMARK_":
pathway = splits[0][9:]
link = splits[1]
genes = splits[2:]
pathway2genes[ pathway ] = genes
for g in genes:
if gene2pathways.has_key( g ):
gene2pathways[g].append( pathway )
else:
gene2pathways[g] = [pathway]
return pathway2genes, gene2pathways
def load_yaml( filename ):
with open(filename, 'r') as f:
data = yaml.load(f)
return data
def check_and_mkdir( path_name, verbose = False ):
ok = False
if os.path.exists( path_name ) == True:
ok = True
else:
if verbose:
print "Making directory: ", path_name
os.makedirs( path_name )
ok = True
return ok
def ReadH5( fullpath ):
df = pd.read_hdf( fullpath )
return df
def OpenHdfStore(location, which_one, mode ):
store_name = "%s.h5"%(which_one)
check_and_mkdir( location )
full_name = os.path.join( location, store_name )
# I think we can just open in 'a' mode for both
if os.path.exists(full_name) is False:
print "OpenHdfStore: %s does NOT EXIST, opening in %s mode"%(full_name, mode)
return pd.HDFStore( full_name, mode )
else:
print "OpenHdfStore: %s does EXISTS, opening in %s mode"%(full_name, mode)
return pd.HDFStore( full_name, mode )
def CloseHdfStore(store):
return store.close()
# a generator for batch ids
class batch_ids_maker:
def __init__(self, batchsize, n, randomize = True):
self.batchsize = min( batchsize, n )
#assert n >= batchsize, "Right now must have batchsize < n"
self.randomize = randomize
#self.batchsize = batchsize
self.n = n
self.indices = self.new_indices()
self.start_idx = 0
def __iter__(self):
return self
def new_indices(self):
if self.randomize:
return np.random.permutation(self.n).astype(int)
else:
return np.arange(self.n,dtype=int)
def next(self, weights = None ):
if weights is not None:
return self.weighted_next( weights )
if self.start_idx+self.batchsize >= len(self.indices):
keep_ids = self.indices[self.start_idx:]
self.indices = np.hstack( (keep_ids, self.new_indices() ))
self.start_idx = 0
ids = self.indices[self.start_idx:self.start_idx+self.batchsize]
self.start_idx += self.batchsize
return ids
def weighted_next( self, weights ):
I = np.argsort( -weights )
ids = self.indices[ I[:self.batchsize] ]
return ids | mit |
weidel-p/nest-simulator | pynest/nest/tests/test_spatial/test_plotting.py | 12 | 5748 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic spatial plotting functions.
"""
import unittest
import nest
import numpy as np
try:
import matplotlib.pyplot as plt
tmp_fig = plt.figure() # make sure we can open a window; DISPLAY may not be set
plt.close(tmp_fig)
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE,
'Plotting impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
nest.PlotLayer(l)
plotted_datapoints = plt.gca().collections[-1].get_offsets().data
reference_datapoints = nest.GetPosition(l)
self.assertTrue(np.allclose(plotted_datapoints, reference_datapoints))
def test_PlotTargets(self):
"""Test plotting targets."""
delta = 0.05
mask = {'rectangular': {'lower_left': [-delta, -2/3 - delta], 'upper_right': [2/3 + delta, delta]}}
cdict = {'rule': 'pairwise_bernoulli', 'p': 1.,
'mask': mask}
sdict = {'synapse_model': 'stdp_synapse'}
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
# connect l -> l
nest.Connect(l, l, cdict, sdict)
ctr = nest.FindCenterElement(l)
fig = nest.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
plotted_datapoints = plt.gca().collections[0].get_offsets().data
eps = 0.01
pos = np.array(nest.GetPosition(l))
pos_xmask = pos[np.where(pos[:, 0] > -eps)]
reference_datapoints = pos_xmask[np.where(pos_xmask[:, 1] < eps)][::-1]
self.assertTrue(np.array_equal(np.sort(plotted_datapoints, axis=0), np.sort(reference_datapoints, axis=0)))
fig = nest.PlotTargets(ctr, l, mask=mask)
ax = fig.gca()
ax.set_title('Call with mask')
self.assertGreaterEqual(len(ax.patches), 1)
def test_plot_probability_kernel(self):
"""Plot parameter probability"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
def probability_calculation(distance):
return 1 - 1.5*distance
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
source = l[25]
source_pos = np.array(nest.GetPosition(source))
source_x, source_y = source_pos
# Calculate reference values
ref_probability = np.zeros(plot_shape[::-1])
for i, x in enumerate(np.linspace(plot_edges[0], plot_edges[1], plot_shape[0])):
positions = np.array([[x, y] for y in np.linspace(plot_edges[2], plot_edges[3], plot_shape[1])])
ref_distances = np.sqrt((positions[:, 0] - source_x)**2 + (positions[:, 1] - source_y)**2)
values = probability_calculation(ref_distances)
ref_probability[:, i] = np.maximum(np.minimum(np.array(values), 1.0), 0.0)
# Create the parameter
parameter = probability_calculation(nest.spatial.distance)
fig, ax = plt.subplots()
nest.PlotProbabilityParameter(source, parameter, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
img = ax.images[0]
img_data = img.get_array().data
self.assertTrue(np.array_equal(img_data, ref_probability))
def test_plot_probability_kernel_with_mask(self):
"""Plot parameter probability with mask"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
parameter = 1 - 1.5*nest.spatial.distance
source = l[25]
masks = [{'circular': {'radius': 0.4}},
{'doughnut': {'inner_radius': 0.2, 'outer_radius': 0.45}},
{'rectangular': {'lower_left': [-.3, -.3], 'upper_right': [0.3, 0.3]}},
{'elliptical': {'major_axis': 0.8, 'minor_axis': 0.4}}]
fig, axs = plt.subplots(2, 2)
for mask, ax in zip(masks, axs.flatten()):
nest.PlotProbabilityParameter(source, parameter, mask=mask, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
self.assertGreaterEqual(len(ax.patches), 1)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
plt.show()
| gpl-2.0 |
mattilyra/scikit-learn | sklearn/__init__.py | 27 | 3086 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
samgoodgame/sf_crime | iterations/KK_scripts/transform_test_data.py | 2 | 6405 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 11:25:06 2017
@author: kalvi
"""
#required imports
import pandas as pd
import numpy as np
import csv
import time
import calendar
def get_test_data(test_transformed_path, test_path, earlyWeatherDataPath, weatherData1, weatherData2):
x_data = pd.read_csv(test_transformed_path, header=0)
########## Adding the date back into the data
dataCSV = open(test_path, 'rt')
csvData = list(csv.reader(dataCSV))
csvFields = csvData[0] #['Dates', 'Category', 'Descript', 'DayOfWeek', 'PdDistrict', 'Resolution', 'Address', 'X', 'Y']
allData = csvData[1:]
dataCSV.close()
df = pd.DataFrame(allData)
df.columns = csvFields
dates = df['Dates']
dates = dates.apply(time.strptime, args=("%Y-%m-%d %H:%M:%S",))
dates = dates.apply(calendar.timegm)
x_data['secondsFromEpoch'] = dates
colnames = x_data.columns.tolist()
colnames = colnames[-1:] + colnames[:-1]
x_data = x_data[colnames]
##########
#functions for processing the sunrise and sunset times of each day
def get_hour_and_minute(milTime):
hour = int(milTime[:-2])
minute = int(milTime[-2:])
return [hour, minute]
def get_date_only(date):
return time.struct_time(tuple([date[0], date[1], date[2], 0, 0, 0, date[6], date[7], date[8]]))
def structure_sun_time(timeSeries, dateSeries):
sunTimes = timeSeries.copy()
for index in range(len(dateSeries)):
sunTimes[index] = time.struct_time(tuple([dateSeries[index][0], dateSeries[index][1], dateSeries[index][2], timeSeries[index][0], timeSeries[index][1], dateSeries[index][5], dateSeries[index][6], dateSeries[index][7], dateSeries[index][8]]))
return sunTimes
def get_weather_data(data_path):
dataCSV = open(data_path, 'rt')
csv_data = list(csv.reader(dataCSV))
csv_fields = csv_data[0] #['Dates', 'Category', 'Descript', 'DayOfWeek', 'PdDistrict', 'Resolution', 'Address', 'X', 'Y']
weather_data = csv_data[1:]
dataCSV.close()
weather_df = pd.DataFrame(weather_data)
weather_df.columns = csv_fields
dates = weather_df['DATE']
sunrise = weather_df['DAILYSunrise']
sunset = weather_df['DAILYSunset']
dates = dates.apply(time.strptime, args=("%Y-%m-%d %H:%M",))
sunrise = sunrise.apply(get_hour_and_minute)
sunrise = structure_sun_time(sunrise, dates)
sunrise = sunrise.apply(calendar.timegm)
sunset = sunset.apply(get_hour_and_minute)
sunset = structure_sun_time(sunset, dates)
sunset = sunset.apply(calendar.timegm)
dates = dates.apply(calendar.timegm)
weather_df['DATE'] = dates
weather_df['DAILYSunrise'] = sunrise
weather_df['DAILYSunset'] = sunset
return weather_df
########## Adding the weather data into the original crime data
earlyWeatherDF = get_weather_data(earlyWeatherDataPath)
weatherDF1 = get_weather_data(weatherData1)
weatherDF2 = get_weather_data(weatherData2)
weatherDF = pd.concat([earlyWeatherDF[450:975],weatherDF1,weatherDF2[32:]],ignore_index=True)
# weather feature selection
weatherMetrics = weatherDF[['DATE','HOURLYDRYBULBTEMPF','HOURLYRelativeHumidity', 'HOURLYWindSpeed', \
'HOURLYSeaLevelPressure', 'HOURLYVISIBILITY', 'DAILYSunrise', 'DAILYSunset']]
weatherMetrics = weatherMetrics.convert_objects(convert_numeric=True)
weatherDates = weatherMetrics['DATE']
#'DATE','HOURLYDRYBULBTEMPF','HOURLYRelativeHumidity', 'HOURLYWindSpeed',
#'HOURLYSeaLevelPressure', 'HOURLYVISIBILITY'
timeWindow = 10800 #3 hours
hourlyDryBulbTemp = []
hourlyRelativeHumidity = []
hourlyWindSpeed = []
hourlySeaLevelPressure = []
hourlyVisibility = []
dailySunrise = []
dailySunset = []
daylight = []
test = 0
for timePoint in dates:#dates is the epoch time from the kaggle data
relevantWeather = weatherMetrics[(weatherDates <= timePoint) & (weatherDates > timePoint - timeWindow)]
hourlyDryBulbTemp.append(relevantWeather['HOURLYDRYBULBTEMPF'].mean())
hourlyRelativeHumidity.append(relevantWeather['HOURLYRelativeHumidity'].mean())
hourlyWindSpeed.append(relevantWeather['HOURLYWindSpeed'].mean())
hourlySeaLevelPressure.append(relevantWeather['HOURLYSeaLevelPressure'].mean())
hourlyVisibility.append(relevantWeather['HOURLYVISIBILITY'].mean())
dailySunrise.append(relevantWeather['DAILYSunrise'].iloc[-1])
dailySunset.append(relevantWeather['DAILYSunset'].iloc[-1])
daylight.append(1.0*((timePoint >= relevantWeather['DAILYSunrise'].iloc[-1]) and (timePoint < relevantWeather['DAILYSunset'].iloc[-1])))
if test%100000 == 0:
print(relevantWeather)
test += 1
hourlyDryBulbTemp = pd.Series.from_array(np.array(hourlyDryBulbTemp))
hourlyRelativeHumidity = pd.Series.from_array(np.array(hourlyRelativeHumidity))
hourlyWindSpeed = pd.Series.from_array(np.array(hourlyWindSpeed))
hourlySeaLevelPressure = pd.Series.from_array(np.array(hourlySeaLevelPressure))
hourlyVisibility = pd.Series.from_array(np.array(hourlyVisibility))
dailySunrise = pd.Series.from_array(np.array(dailySunrise))
dailySunset = pd.Series.from_array(np.array(dailySunset))
daylight = pd.Series.from_array(np.array(daylight))
x_data['HOURLYDRYBULBTEMPF'] = hourlyDryBulbTemp
x_data['HOURLYRelativeHumidity'] = hourlyRelativeHumidity
x_data['HOURLYWindSpeed'] = hourlyWindSpeed
x_data['HOURLYSeaLevelPressure'] = hourlySeaLevelPressure
x_data['HOURLYVISIBILITY'] = hourlyVisibility
#x_data['DAILYSunrise'] = dailySunrise
#x_data['DAILYSunset'] = dailySunset
x_data['Daylight'] = daylight
x_data = x_data.drop('secondsFromEpoch', 1)
x_data = x_data.drop('pd_bayview_binary', 1)
return x_data
test_transformed_path = "./data/test_transformed.csv"
test_path = "./data/test.csv"
earlyWeatherDataPath = "./data/1049158.csv"
weatherData1 = "./data/1027175.csv"
weatherData2 = "./data/1027176.csv"
write_path = "C:/MIDS/W207 final project/test_data_with_weather.csv"
x_data = get_test_data(test_transformed_path, test_path, earlyWeatherDataPath, weatherData1, weatherData2)
x_data.to_csv(path_or_buf=write_path,index=0) | mit |
NelisVerhoef/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
justinbois/fish-activity | tests/test_parse.py | 1 | 8083 | import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import fishact
def test_sniffer():
n_header, delimiter, line = fishact.parse._sniff_file_info(
'tests/single_gtype.txt')
assert n_header == 2
assert delimiter is None
assert line == '1\n'
n_header, delimiter, line = fishact.parse._sniff_file_info(
'tests/multiple_gtype.txt')
assert n_header == 2
assert delimiter == '\t'
assert line == '1\t5\t2\n'
def test_gtype_loader():
df = fishact.parse.load_gtype('tests/single_gtype.txt', quiet=True,
rstrip=True)
assert all(df.columns == ['genotype', 'location'])
assert all(df['genotype'] == 'all fish')
assert all(df['location'] == [1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, 93, 94, 96])
def test_resample_array():
x = np.arange(10, dtype=float)
assert np.isclose(fishact.parse._resample_array(x, 10),
np.array([45.])).all()
assert np.isclose(fishact.parse._resample_array(x, 20),
np.array([90.])).all()
assert np.isclose(fishact.parse._resample_array(x, 5),
np.array([10., 35.])).all()
assert np.isclose(fishact.parse._resample_array(x, 3),
np.array([3., 12., 21., 27.])).all()
x[-3] = np.nan
assert np.isclose(fishact.parse._resample_array(x, 5)[0], 10.0) \
and np.isnan(fishact.parse._resample_array(x, 5)[1])
def test_resample_segment():
df = pd.DataFrame({'a': np.arange(10),
'b': np.arange(10, 20),
'c': np.arange(10, dtype=float),
'd': np.arange(10, 20, dtype=float)})
re_df = fishact.parse._resample_segment(df, 5, ['c'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 5],
'b': [10, 15],
'c': [10., 35.],
'd': [10., 15.]})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse._resample_segment(df, 5, ['c', 'd'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 5],
'b': [10, 15],
'c': [10., 35.],
'd': [60., 85.]})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse._resample_segment(df, 3, ['c'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 3, 6, 9],
'b': [10, 13, 16, 19],
'c': [3., 12., 21., 27.],
'd': [10., 13., 16., 19.]})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse._resample_segment(df, 3, ['c', 'd'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 3, 6, 9],
'b': [10, 13, 16, 19],
'c': [3., 12., 21., 27.],
'd': [33., 42., 51., 57.]})
assert_frame_equal(re_df, correct_df)
def test_resample():
df = pd.DataFrame(
{'location': np.concatenate((np.ones(10), 2*np.ones(10))).astype(int),
'exp_time': np.concatenate((np.arange(10),
np.arange(10))).astype(float),
'exp_ind': np.concatenate((np.arange(10), np.arange(10))).astype(int),
'zeit': np.concatenate((np.arange(10),
np.arange(10))).astype(float),
'zeit_ind': np.concatenate((np.arange(10),
np.arange(10))).astype(int),
'activity': np.concatenate((np.arange(10),
np.arange(10, 20))).astype(float),
'sleep': np.ones(20, dtype=float),
'light': [True]*5 + [False]*5 + [True]*5 + [False]*5,
'day': [5]*10 + [6]*10,
'genotype': ['wt']*20,
'acquisition': np.ones(20, dtype=int),
'instrument': np.ones(20, dtype=int),
'trial': np.ones(20, dtype=int),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:01:00',
'2017-03-30 14:02:00',
'2017-03-30 14:03:00',
'2017-03-30 14:04:00',
'2017-03-30 14:05:00',
'2017-03-30 14:06:00',
'2017-03-30 14:07:00',
'2017-03-30 14:08:00',
'2017-03-30 14:09:00']*2)})
re_df = fishact.parse.resample(df, 5, signal=['activity', 'sleep'],
quiet=True)
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame(
{'activity': np.array([10., 35., 60., 85.]),
'day': [5, 5, 6, 6],
'location': np.array([1, 1, 2, 2], dtype=int),
'genotype': ['wt']*4,
'light': [True, False, True, False],
'sleep': np.array([5., 5., 5., 5.]),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:05:00']*2),
'exp_time': np.array([0., 5., 0., 5.]),
'exp_ind': np.array([0, 5, 0, 5], dtype=int),
'zeit': np.array([0., 5., 0., 5.]),
'zeit_ind': np.array([0, 5, 0, 5], dtype=int),
'acquisition': np.ones(4, dtype=int),
'instrument': np.ones(4, dtype=int),
'trial': np.ones(4, dtype=int)})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse.resample(df, 3, quiet=True)
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame(
{'activity': np.array([3., 10.5, 18., 25.5, 33., 40.5, 48., 55.5]),
'day': [5, 5, 5, 5, 6, 6, 6, 6],
'location': np.array([1, 1, 1, 1, 2, 2, 2, 2], dtype=int),
'genotype': ['wt']*8,
'light': [True, True, False, False, True, True, False, False],
'sleep': np.array([3., 3., 3., 3., 3., 3., 3., 3.]),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:03:00',
'2017-03-30 14:05:00',
'2017-03-30 14:08:00']*2),
'exp_time': np.array([0., 3., 5., 8., 0., 3., 5., 8.]),
'exp_ind': np.array([0, 3, 5, 8, 0, 3, 5, 8], dtype=int),
'zeit': np.array([0., 3., 5., 8., 0., 3., 5., 8.]),
'zeit_ind': np.array([0, 3, 5, 8, 0, 3, 5, 8], dtype=int),
'acquisition': np.ones(8, dtype=int),
'instrument': np.ones(8, dtype=int),
'trial': np.ones(8, dtype=int)})
assert_frame_equal(re_df, correct_df)
def test_tidy_data():
# Test that it will not overwrite existing file
with pytest.raises(RuntimeError) as excinfo:
fishact.parse.tidy_data('test.csv', 'test_geno.txt', 'test.csv')
excinfo.match("Cowardly refusing to overwrite input file.")
with pytest.raises(RuntimeError) as excinfo:
fishact.parse.tidy_data('test.csv', 'test_geno.txt', 'test_geno.txt')
excinfo.match("Cowardly refusing to overwrite input file.")
with pytest.raises(RuntimeError) as excinfo:
fishact.parse.tidy_data('test.csv', 'test_geno.txt',
'tests/empty_file_for_tests.csv')
excinfo.match("tests/empty_file_for_tests.csv already exists, cowardly refusing to overwrite.")
## TO DO: integration test: make sure output CSV is as expected.
| mit |
CVML/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
huzq/scikit-learn | examples/cluster/plot_mean_shift.py | 23 | 1775 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets import make_blobs
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
# #############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/neighbors/plot_lof.py | 30 | 1939 | """
=================================================
Anomaly detection with Local Outlier Factor (LOF)
=================================================
This example presents the Local Outlier Factor (LOF) estimator. The LOF
algorithm is an unsupervised outlier detection method which computes the local
density deviation of a given data point with respect to its neighbors.
It considers as outlier samples that have a substantially lower density than
their neighbors.
The number of neighbors considered, (parameter n_neighbors) is typically
chosen 1) greater than the minimum number of objects a cluster has to contain,
so that other objects can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by objects that can potentially be
local outliers.
In practice, such informations are generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
np.random.seed(42)
# Generate train data
X = 0.3 * np.random.randn(100, 2)
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X + 2, X - 2, X_outliers]
# fit the model
clf = LocalOutlierFactor(n_neighbors=20)
y_pred = clf.fit_predict(X)
y_pred_outliers = y_pred[200:]
# plot the level sets of the decision function
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Local Outlier Factor (LOF)")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
a = plt.scatter(X[:200, 0], X[:200, 1], c='white')
b = plt.scatter(X[200:, 0], X[200:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a, b],
["normal observations",
"abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/scipy/spatial/tests/test__plotutils.py | 15 | 2140 | from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_, assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import MatplotlibDeprecationWarning
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| mit |
anntzer/seaborn | seaborn/_core.py | 1 | 44884 | import warnings
import itertools
from copy import copy
from functools import partial
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .palettes import (
QUAL_PALETTES,
color_palette,
cubehelix_palette,
_parse_cubehelix_args,
)
from .utils import (
get_color_cycle,
remove_na,
)
class SemanticMapping:
"""Base class for mapping data values to plot attributes."""
# -- Default attributes that all SemanticMapping subclasses must set
# Whether the mapping is numeric, categorical, or datetime
map_type = None
# Ordered list of unique values in the input data
levels = None
# A mapping from the data values to corresponding plot attributes
lookup_table = None
def __init__(self, plotter):
# TODO Putting this here so we can continue to use a lot of the
# logic that's built into the library, but the idea of this class
# is to move towards semantic mappings that are agnositic about the
# kind of plot they're going to be used to draw.
# Fully achieving that is going to take some thinking.
self.plotter = plotter
def map(cls, plotter, *args, **kwargs):
# This method is assigned the __init__ docstring
method_name = "_{}_map".format(cls.__name__[:-7].lower())
setattr(plotter, method_name, cls(plotter, *args, **kwargs))
return plotter
def _lookup_single(self, key):
"""Apply the mapping to a single data value."""
return self.lookup_table[key]
def __call__(self, key, *args, **kwargs):
"""Get the attribute(s) values for the data key."""
if isinstance(key, (list, np.ndarray, pd.Series)):
return [self._lookup_single(k, *args, **kwargs) for k in key]
else:
return self._lookup_single(key, *args, **kwargs)
@share_init_params_with_map
class HueMapping(SemanticMapping):
"""Mapping that sets artist colors according to data values."""
# A specification of the colors that should appear in the plot
palette = None
# An object that normalizes data values to [0, 1] range for color mapping
norm = None
# A continuous colormap object for interpolating in a numeric context
cmap = None
def __init__(
self, plotter, palette=None, order=None, norm=None,
):
"""Map the levels of the `hue` variable to distinct colors.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["hue"]
if data.notna().any():
map_type = self.infer_map_type(
palette, norm, plotter.input_format, plotter.var_types["hue"]
)
# Our goal is to end up with a dictionary mapping every unique
# value in `data` to a color. We will also keep track of the
# metadata about this mapping we will need for, e.g., a legend
# --- Option 1: numeric mapping with a matplotlib colormap
if map_type == "numeric":
data = pd.to_numeric(data)
levels, lookup_table, norm, cmap = self.numeric_mapping(
data, palette, norm,
)
# --- Option 2: categorical mapping using seaborn palette
elif map_type == "categorical":
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
data, palette, order,
)
# --- Option 3: datetime mapping
else:
# TODO this needs actual implementation
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), palette, order,
)
self.map_type = map_type
self.lookup_table = lookup_table
self.palette = palette
self.levels = levels
self.norm = norm
self.cmap = cmap
def _lookup_single(self, key):
"""Get the color for a single value, using colormap to interpolate."""
try:
# Use a value that's in the original data vector
value = self.lookup_table[key]
except KeyError:
# Use the colormap to interpolate between existing datapoints
# (e.g. in the context of making a continuous legend)
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
value = self.cmap(normed)
return value
def infer_map_type(self, palette, norm, input_format, var_type):
"""Determine how to implement the mapping."""
if palette in QUAL_PALETTES:
map_type = "categorical"
elif norm is not None:
map_type = "numeric"
elif isinstance(palette, (dict, list)):
map_type = "categorical"
elif input_format == "wide":
map_type = "categorical"
else:
map_type = var_type
return map_type
def categorical_mapping(self, data, palette, order):
"""Determine colors when the hue mapping is categorical."""
# -- Identify the order and name of the levels
levels = categorical_order(data, order)
n_colors = len(levels)
# -- Identify the set of colors to use
if isinstance(palette, dict):
missing = set(levels) - set(palette)
if any(missing):
err = "The palette dictionary is missing keys: {}"
raise ValueError(err.format(missing))
lookup_table = palette
else:
if palette is None:
if n_colors <= len(get_color_cycle()):
colors = color_palette(None, n_colors)
else:
colors = color_palette("husl", n_colors)
elif isinstance(palette, list):
if len(palette) != n_colors:
err = "The palette list has the wrong number of colors."
raise ValueError(err)
colors = palette
else:
colors = color_palette(palette, n_colors)
lookup_table = dict(zip(levels, colors))
return levels, lookup_table
def numeric_mapping(self, data, palette, norm):
"""Determine colors when the hue variable is quantitative."""
if isinstance(palette, dict):
# The presence of a norm object overrides a dictionary of hues
# in specifying a numeric mapping, so we need to process it here.
levels = list(sorted(palette))
colors = [palette[k] for k in sorted(palette)]
cmap = mpl.colors.ListedColormap(colors)
lookup_table = palette.copy()
else:
# The levels are the sorted unique values in the data
levels = list(np.sort(remove_na(data.unique())))
# --- Sort out the colormap to use from the palette argument
# Default numeric palette is our default cubehelix palette
# TODO do we want to do something complicated to ensure contrast?
palette = "ch:" if palette is None else palette
if isinstance(palette, mpl.colors.Colormap):
cmap = palette
elif str(palette).startswith("ch:"):
args, kwargs = _parse_cubehelix_args(palette)
cmap = cubehelix_palette(0, *args, as_cmap=True, **kwargs)
else:
try:
cmap = mpl.cm.get_cmap(palette)
except (ValueError, TypeError):
err = "Palette {} not understood"
raise ValueError(err)
# Now sort out the data normalization
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = "``hue_norm`` must be None, tuple, or Normalize object."
raise ValueError(err)
if not norm.scaled():
norm(np.asarray(data.dropna()))
lookup_table = dict(zip(levels, cmap(norm(levels))))
return levels, lookup_table, norm, cmap
@share_init_params_with_map
class SizeMapping(SemanticMapping):
"""Mapping that sets artist sizes according to data values."""
# An object that normalizes data values to [0, 1] range
norm = None
def __init__(
self, plotter, sizes=None, order=None, norm=None,
):
"""Map the levels of the `size` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["size"]
if data.notna().any():
map_type = self.infer_map_type(
norm, sizes, plotter.var_types["size"]
)
# --- Option 1: numeric mapping
if map_type == "numeric":
levels, lookup_table, norm = self.numeric_mapping(
data, sizes, norm,
)
# --- Option 2: categorical mapping
elif map_type == "categorical":
levels, lookup_table = self.categorical_mapping(
data, sizes, order,
)
# --- Option 3: datetime mapping
# TODO this needs an actual implementation
else:
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), sizes, order,
)
self.map_type = map_type
self.levels = levels
self.norm = norm
self.sizes = sizes
self.lookup_table = lookup_table
def infer_map_type(self, norm, sizes, var_type):
if norm is not None:
map_type = "numeric"
elif isinstance(sizes, (dict, list)):
map_type = "categorical"
else:
map_type = var_type
return map_type
def _lookup_single(self, key):
try:
value = self.lookup_table[key]
except KeyError:
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
size_values = self.lookup_table.values()
size_range = min(size_values), max(size_values)
value = size_range[0] + normed * np.ptp(size_range)
return value
def categorical_mapping(self, data, sizes, order):
levels = categorical_order(data, order)
if isinstance(sizes, dict):
# Dict inputs map existing data values to the size attribute
missing = set(levels) - set(sizes)
if any(missing):
err = f"Missing sizes for the following levels: {missing}"
raise ValueError(err)
lookup_table = sizes.copy()
elif isinstance(sizes, list):
# List inputs give size values in the same order as the levels
if len(sizes) != len(levels):
err = "The `sizes` list has the wrong number of values."
raise ValueError(err)
lookup_table = dict(zip(levels, sizes))
else:
if isinstance(sizes, tuple):
# Tuple input sets the min, max size values
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# Otherwise, we need to get the min, max size values from
# the plotter object we are attached to.
# TODO this is going to cause us trouble later, because we
# want to restructure things so that the plotter is generic
# across the visual representation of the data. But at this
# point, we don't know the visual representation. Likely we
# want to change the logic of this Mapping so that it gives
# points on a nornalized range that then gets unnormalized
# when we know what we're drawing. But given the way the
# package works now, this way is cleanest.
sizes = self.plotter._default_size_range
# For categorical sizes, use regularly-spaced linear steps
# between the minimum and maximum sizes
sizes = np.linspace(*sizes, len(levels))
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table
def numeric_mapping(self, data, sizes, norm):
if isinstance(sizes, dict):
# The presence of a norm object overrides a dictionary of sizes
# in specifying a numeric mapping, so we need to process it
# dictionary here
levels = list(np.sort(list(sizes)))
size_values = sizes.values()
size_range = min(size_values), max(size_values)
else:
# The levels here will be the unique values in the data
levels = list(np.sort(remove_na(data.unique())))
if isinstance(sizes, tuple):
# For numeric inputs, the size can be parametrized by
# the minimum and maximum artist values to map to. The
# norm object that gets set up next specifies how to
# do the mapping.
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
size_range = sizes
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# When not provided, we get the size range from the plotter
# object we are attached to. See the note in the categorical
# method about how this is suboptimal for future development.:
size_range = self.plotter._default_size_range
# Now that we know the minimum and maximum sizes that will get drawn,
# we need to map the data values that we have into that range. We will
# use a matplotlib Normalize class, which is typically used for numeric
# color mapping but works fine here too. It takes data values and maps
# them into a [0, 1] interval, potentially nonlinear-ly.
if norm is None:
# Default is a linear function between the min and max data values
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
# It is also possible to give different limits in data space
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = f"Value for size `norm` parameter not understood: {norm}"
raise ValueError(err)
else:
# If provided with Normalize object, copy it so we can modify
norm = copy(norm)
# Set the mapping so all output values are in [0, 1]
norm.clip = True
# If the input range is not set, use the full range of the data
if not norm.scaled():
norm(levels)
# Map from data values to [0, 1] range
sizes_scaled = norm(levels)
# Now map from the scaled range into the artist units
if isinstance(sizes, dict):
lookup_table = sizes
else:
lo, hi = size_range
sizes = lo + sizes_scaled * (hi - lo)
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table, norm
@share_init_params_with_map
class StyleMapping(SemanticMapping):
"""Mapping that sets artist style according to data values."""
# Style mapping is always treated as categorical
map_type = "categorical"
def __init__(
self, plotter, markers=None, dashes=None, order=None,
):
"""Map the levels of the `style` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["style"]
if data.notna().any():
# Cast to list to handle numpy/pandas datetime quirks
if variable_type(data) == "datetime":
data = list(data)
# Find ordered unique values
levels = categorical_order(data, order)
markers = self._map_attributes(
markers, levels, unique_markers(len(levels)), "markers",
)
dashes = self._map_attributes(
dashes, levels, unique_dashes(len(levels)), "dashes",
)
# Build the paths matplotlib will use to draw the markers
paths = {}
filled_markers = []
for k, m in markers.items():
if not isinstance(m, mpl.markers.MarkerStyle):
m = mpl.markers.MarkerStyle(m)
paths[k] = m.get_path().transformed(m.get_transform())
filled_markers.append(m.is_filled())
# Mixture of filled and unfilled markers will show line art markers
# in the edge color, which defaults to white. This can be handled,
# but there would be additional complexity with specifying the
# weight of the line art markers without overwhelming the filled
# ones with the edges. So for now, we will disallow mixtures.
if any(filled_markers) and not all(filled_markers):
err = "Filled and line art markers cannot be mixed"
raise ValueError(err)
lookup_table = {}
for key in levels:
lookup_table[key] = {}
if markers:
lookup_table[key]["marker"] = markers[key]
lookup_table[key]["path"] = paths[key]
if dashes:
lookup_table[key]["dashes"] = dashes[key]
self.levels = levels
self.lookup_table = lookup_table
def _lookup_single(self, key, attr=None):
"""Get attribute(s) for a given data point."""
if attr is None:
value = self.lookup_table[key]
else:
value = self.lookup_table[key][attr]
return value
def _map_attributes(self, arg, levels, defaults, attr):
"""Handle the specification for a given style attribute."""
if arg is True:
lookup_table = dict(zip(levels, defaults))
elif isinstance(arg, dict):
missing = set(levels) - set(arg)
if missing:
err = f"These `{attr}` levels are missing values: {missing}"
raise ValueError(err)
lookup_table = arg
elif isinstance(arg, Sequence):
if len(levels) != len(arg):
err = f"The `{attr}` argument has the wrong number of values"
raise ValueError(err)
lookup_table = dict(zip(levels, arg))
elif arg:
err = f"This `{attr}` argument was not understood: {arg}"
raise ValueError(err)
else:
lookup_table = {}
return lookup_table
# =========================================================================== #
class VectorPlotter:
"""Base class for objects underlying *plot functions."""
_semantic_mappings = {
"hue": HueMapping,
"size": SizeMapping,
"style": StyleMapping,
}
# TODO units is another example of a non-mapping "semantic"
# we need a general name for this and separate handling
semantics = "x", "y", "hue", "size", "style", "units"
wide_structure = {
"x": "index", "y": "values", "hue": "columns", "style": "columns",
}
flat_structure = {"x": "index", "y": "values"}
_default_size_range = 1, 2 # Unused but needed in tests, ugh
def __init__(self, data=None, variables={}):
self.assign_variables(data, variables)
for var, cls in self._semantic_mappings.items():
if var in self.semantics:
# Create the mapping function
map_func = partial(cls.map, plotter=self)
setattr(self, f"map_{var}", map_func)
# Call the mapping function to initialize with default values
getattr(self, f"map_{var}")()
@classmethod
def get_semantics(cls, kwargs):
"""Subset a dictionary` arguments with known semantic variables."""
return {k: kwargs[k] for k in cls.semantics}
def assign_variables(self, data=None, variables={}):
"""Define plot variables, optionally using lookup from `data`."""
x = variables.get("x", None)
y = variables.get("y", None)
if x is None and y is None:
self.input_format = "wide"
plot_data, variables = self._assign_variables_wideform(
data, **variables,
)
else:
self.input_format = "long"
plot_data, variables = self._assign_variables_longform(
data, **variables,
)
self.plot_data = plot_data
self.variables = variables
self.var_types = {
v: variable_type(
plot_data[v],
boolean_type="numeric" if v in "xy" else "categorical"
)
for v in variables
}
return self
def _assign_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# TODO raise here if any kwarg values are not None,
# # if we decide for "structure-only" wide API
# First, determine if the data object actually has any data in it
empty = data is None or not len(data)
# Then, determine if we have "flat" data (a single vector)
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(data)
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame(columns=self.semantics)
variables = {}
elif flat:
# Handle flat data by converting to pandas Series and using the
# index and/or values to define x and/or y
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data).copy()
names = {
"values": flat_data.name,
"index": flat_data.index.name
}
plot_data = {}
variables = {}
for var in ["x", "y"]:
if var in self.flat_structure:
attr = self.flat_structure[var]
plot_data[var] = getattr(flat_data, attr)
variables[var] = names[self.flat_structure[var]]
plot_data = pd.DataFrame(plot_data).reindex(columns=self.semantics)
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: pd.Series(val) for key, val in data.items()}
# Otherwise, delegate to the pandas DataFrame constructor
# This is where we'd prefer to use a general interface that says
# "give me this data as a pandas DataFrame", so we can accept
# DataFrame objects from other libraries
wide_data = pd.DataFrame(data, copy=True)
# At this point we should reduce the dataframe to numeric cols
numeric_cols = wide_data.apply(variable_type) == "numeric"
wide_data = wide_data.loc[:, numeric_cols]
# Now melt the data to long form
melt_kws = {"var_name": "columns", "value_name": "values"}
if "index" in self.wide_structure.values():
melt_kws["id_vars"] = "index"
wide_data["index"] = wide_data.index.to_series()
plot_data = wide_data.melt(**melt_kws)
# Assign names corresponding to plot semantics
for var, attr in self.wide_structure.items():
plot_data[var] = plot_data[attr]
plot_data = plot_data.reindex(columns=self.semantics)
# Define the variable names
variables = {}
for var, attr in self.wide_structure.items():
obj = getattr(wide_data, attr)
variables[var] = getattr(obj, "name", None)
return plot_data, variables
def _assign_variables_longform(self, data=None, **kwargs):
"""Define plot variables given long-form data and/or vector inputs.
Parameters
----------
data : dict-like collection of vectors
Input data where variable names map to vector values.
kwargs : variable -> data mappings
Keys are seaborn variables (x, y, hue, ...) and values are vectors
in any format that can construct a :class:`pandas.DataFrame` or
names of columns or index levels in ``data``.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
Raises
------
ValueError
When variables are strings that don't appear in ``data``.
"""
plot_data = {}
variables = {}
# Data is optional; all variables can be defined as vectors
if data is None:
data = {}
# TODO should we try a data.to_dict() or similar here to more
# generally accept objects with that interface?
# Note that dict(df) also works for pandas, and gives us what we
# want, whereas DataFrame.to_dict() gives a nested dict instead of
# a dict of series.
# Variables can also be extraced from the index attribute
# TODO is this the most general way to enable it?
# There is no index.to_dict on multiindex, unfortunately
try:
index = data.index.to_frame()
except AttributeError:
index = {}
# The caller will determine the order of variables in plot_data
for key, val in kwargs.items():
if isinstance(val, (str, bytes)):
# String inputs trigger __getitem__
if val in data:
# First try to get an entry in the data object
plot_data[key] = data[val]
variables[key] = val
elif val in index:
# Failing that, try to get an entry in the index object
plot_data[key] = index[val]
variables[key] = val
else:
# We don't know what this name means
err = f"Could not interpret input '{val}'"
raise ValueError(err)
else:
# Otherwise, assume the value is itself a vector of data
# TODO check for 1D here or let pd.DataFrame raise?
plot_data[key] = val
# Try to infer the name of the variable
variables[key] = getattr(val, "name", None)
# Construct a tidy plot DataFrame. This will convert a number of
# types automatically, aligning on index in case of pandas objects
plot_data = pd.DataFrame(plot_data, columns=self.semantics)
# Reduce the variables dictionary to fields with valid data
variables = {
var: name
for var, name in variables.items()
if plot_data[var].notnull().any()
}
return plot_data, variables
def _semantic_subsets(
self, grouping_semantics, reverse=False, from_comp_data=False,
):
"""Generator for getting subsets of data defined by semantic variables.
Parameters
----------
grouping_semantics : list of strings
Semantic variables that define the subsets of data.
reverse : bool, optional
If True, reverse the order of iteration.
from_comp_data : bool, optional
If True, use self.comp_data rather than self.plot_data
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
if isinstance(grouping_semantics, str):
grouping_semantics = [grouping_semantics]
# Reduce to the semantics used in this plot
grouping_semantics = [
var for var in grouping_semantics if var in self.variables
]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if grouping_semantics:
grouped_data = data.groupby(
grouping_semantics, sort=False, as_index=False
)
grouping_keys = []
for var in grouping_semantics:
# TODO this is messy, add "semantic levels" property?
map_obj = getattr(self, f"_{var}_map")
grouping_keys.append(map_obj.levels)
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
# Pandas fails with singleton tuple inputs
pd_key = key[0] if len(key) == 1 else key
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
continue
yield dict(zip(grouping_semantics, key)), data_subset
else:
yield {}, data
@property
def comp_data(self):
"""Dataframe with numeric x and y, after unit conversion and log scaling."""
if not hasattr(self, "ax"):
# Probably a good idea, but will need a bunch of tests updated
# Most of these tests should just use the external interface
# Then this can be reeneabled.
# raise AttributeError("No Axes attached to plotter")
return self.plot_data
if not hasattr(self, "_comp_data"):
comp_data = self.plot_data.copy(deep=False)
for var in "xy":
axis = getattr(self.ax, f"{var}axis")
comp_var = axis.convert_units(self.plot_data[var])
if axis.get_scale() == "log":
comp_var = np.log10(comp_var)
comp_data[var] = comp_var
self._comp_data = comp_data
return self._comp_data
def _attach(self, ax, allowed_types=None, log_scale=None):
"""Associate the plotter with a matplotlib Axes and initialize its units.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Axes object that we will eventually plot onto.
allowed_types : str or list of str
If provided, raise when either the x or y variable does not have
one of the declared seaborn types.
log_scale : bool, number, or pair of bools or numbers
If not False, set the axes to use log scaling, with the given
base or defaulting to 10. If a tuple, interpreted as separate
arguments for the x and y axes.
"""
if allowed_types is None:
# TODO should we define this default somewhere?
allowed_types = ["numeric", "datetime", "categorical"]
elif isinstance(allowed_types, str):
allowed_types = [allowed_types]
for var in set("xy").intersection(self.variables):
# Check types of x/y variables
var_type = self.var_types[var]
if var_type not in allowed_types:
err = (
f"The {var} variable is {var_type}, but one of "
f"{allowed_types} is required"
)
raise TypeError(err)
# Register with the matplotlib unit conversion machinery
# TODO do we want to warn or raise if mixing units?
axis = getattr(ax, f"{var}axis")
seed_data = self.plot_data[var]
if var_type == "categorical":
seed_data = categorical_order(seed_data)
axis.update_units(seed_data)
# Possibly log-scale one or both axes
if log_scale is not None:
# Allow single value or x, y tuple
try:
scalex, scaley = log_scale
except TypeError:
scalex = log_scale if "x" in self.variables else False
scaley = log_scale if "y" in self.variables else False
for axis, scale in zip("xy", (scalex, scaley)):
if scale:
set_scale = getattr(ax, f"set_{axis}scale")
if scale is True:
set_scale("log")
else:
set_scale("log", **{f"base{axis}": scale})
self.ax = ax
def _add_axis_labels(self, ax, default_x="", default_y=""):
"""Add axis labels from internal variable names if not already existing."""
if not ax.get_xlabel():
ax.set_xlabel(self.variables.get("x", default_x))
if not ax.get_ylabel():
ax.set_ylabel(self.variables.get("y", default_y))
def variable_type(vector, boolean_type="numeric"):
"""Determine whether a vector contains numeric, categorical, or dateime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
binary_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return "numeric"
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
if np.isin(vector, [0, 1, np.nan]).all():
return boolean_type
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return "numeric"
if pd.api.types.is_categorical_dtype(vector):
return "categorical"
if pd.api.types.is_datetime64_dtype(vector):
return "datetime"
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return "numeric"
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return "datetime"
# Otherwise, our final fallback is to consider things categorical
return "categorical"
def infer_orient(x=None, y=None, orient=None, require_numeric=True):
"""Determine how the plot should be oriented based on the data.
For historical reasons, the convention is to call a plot "horizontally"
or "vertically" oriented based on the axis representing its dependent
variable. Practically, this is used when determining the axis for
numerical aggregation.
Paramters
---------
x, y : Vector data or None
Positional data vectors for the plot.
orient : string or None
Specified orientation, which must start with "v" or "h" if not None.
require_numeric : bool
If set, raise when the implied dependent variable is not numeric.
Returns
-------
orient : "v" or "h"
Raises
------
ValueError: When `orient` is not None and does not start with "h" or "v"
TypeError: When dependant variable is not numeric, with `require_numeric`
"""
x_type = None if x is None else variable_type(x)
y_type = None if y is None else variable_type(y)
nonnumeric_dv_error = "{} orientation requires numeric `{}` variable."
single_var_warning = "{} orientation ignored with only `{}` specified."
if x is None:
if str(orient).startswith("h"):
warnings.warn(single_var_warning.format("Horizontal", "y"))
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif y is None:
if str(orient).startswith("v"):
warnings.warn(single_var_warning.format("Vertical", "x"))
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif str(orient).startswith("v"):
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif str(orient).startswith("h"):
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif orient is not None:
raise ValueError(f"Value for `orient` not understood: {orient}")
elif x_type != "numeric" and y_type == "numeric":
return "v"
elif x_type == "numeric" and y_type != "numeric":
return "h"
elif require_numeric and "numeric" not in (x_type, y_type):
err = "Neither the `x` nor `y` variable appears to be numeric."
raise TypeError(err)
else:
return "v"
def unique_dashes(n):
"""Build an arbitrarily long list of unique dash styles for lines.
Parameters
----------
n : int
Number of unique dash specs to generate.
Returns
-------
dashes : list of strings or tuples
Valid arguments for the ``dashes`` parameter on
:class:`matplotlib.lines.Line2D`. The first spec is a solid
line (``""``), the remainder are sequences of long and short
dashes.
"""
# Start with dash specs that are well distinguishable
dashes = [
"",
(4, 1.5),
(1, 1),
(3, 1.25, 1.5, 1.25),
(5, 1, 1, 1),
]
# Now programatically build as many as we need
p = 3
while len(dashes) < n:
# Take combinations of long and short dashes
a = itertools.combinations_with_replacement([3, 1.25], p)
b = itertools.combinations_with_replacement([4, 1], p)
# Interleave the combinations, reversing one of the streams
segment_list = itertools.chain(*zip(
list(a)[1:-1][::-1],
list(b)[1:-1]
))
# Now insert the gaps
for segments in segment_list:
gap = min(segments)
spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
dashes.append(spec)
p += 1
return dashes[:n]
def unique_markers(n):
"""Build an arbitrarily long list of unique marker styles for points.
Parameters
----------
n : int
Number of unique marker specs to generate.
Returns
-------
markers : list of string or tuples
Values for defining :class:`matplotlib.markers.MarkerStyle` objects.
All markers will be filled.
"""
# Start with marker specs that are well distinguishable
markers = [
"o",
"X",
(4, 0, 45),
"P",
(4, 0, 0),
(4, 1, 0),
"^",
(4, 1, 45),
"v",
]
# Now generate more from regular polygons of increasing order
s = 5
while len(markers) < n:
a = 360 / (s + 1) / 2
markers.extend([
(s + 1, 1, a),
(s + 1, 0, a),
(s, 1, 0),
(s, 0, 0),
])
s += 1
# Convert to MarkerStyle object, using only exactly what we need
# markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]
return markers[:n]
def categorical_order(vector, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
vector : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(vector, "categories"):
order = vector.categories
else:
try:
order = vector.cat.categories
except (TypeError, AttributeError):
try:
order = vector.unique()
except AttributeError:
order = pd.unique(vector)
if variable_type(vector) == "numeric":
order = np.sort(order)
order = filter(pd.notnull, order)
return list(order)
| bsd-3-clause |
jeffwdoak/free_energies | free_energies/electronicdos.py | 1 | 14960 | #!/usr/bin/python
# electronicdos.py v0.5 5-16-2012 Jeff Doak jeff.w.doak@gmail.com
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.integrate import quad
from scipy.optimize import fsolve
import sys, subprocess
BOLTZCONST = 8.617e-5 #eV/K
class ElectronicDOS:
"""
Class to calculate equilibrium carrier concentrations, as well as
equilibrium thermodynamic properties of an electronic density of states.
Class constants of ElectronicDOS:
- BOLTZCONST - Boltzmann's Constant (eV/K)
Instance attributes of ElectronicDOS:
- n_atoms - number of atoms of the unit cell from which the DOS was
calculated
- energy - numpy array of energies at which DOS is calculated (eV)
- dos_tot - numpy array of density of spin up and spin down allowed electron
states at each energy in the array energy (# states/eV/atom)
- dos_spin - numpy array of the difference in density between spin up and
spin down states (# states/eV/atom)
- e_min - minimum energy in numpy array energy (eV)
- e_max - maximum energy in numpy array energy (eV)
- e_fermi - zero Kelvin fermi energy for the electronic DOS (eV)
- step_size - energy difference between two consecutive points in the DOSCAR
file (eV)
- vbm - valence band maximum, set to e_fermi for metals (eV)
- cbm - conduction band minimum, set to e_fermi for metals (eV)
- band_gap - band gap around the fermi energy, zero for metals (eV)
- temp - numpy array of temperatures at which to calculate equilibrium
electron chemical potentials, electron concentrations, and hole
concentrations (K)
- mu_e - numpy array of electron chemical potentials calculated at each
temperature in temp (eV)
- num_e - numpy array of equilibrium electron concentrations calculated at
each temperature in temp (# e's/atom)
- num_h - numpy array of equilibrium hole concentrations calculated at each
temperature in temp (# h's/atom)
- E_el - numpy array of electronic energy calculated at each temperature
in temp (eV/atom)
- S_el - numpy array of electronic entropy calculated at each temperature
in temp (kB/atom)
- F_el - numpy array of electronic free energy calculated at each
temperature in temp (eV/atom)
"""
def __init__(self,input_,format=None):
if isinstance(input_,str):
try:
input_ = open(input_,'r')
except IOError:
print "Error reading input file."
print "Program will now exit!"
sys.exit(1)
if isinstance(input_,file):
if format == "ezvasp":
self.read_ezvasp_dos(input_)
else:
self.read_doscar(input_)
nelec = subprocess.Popen("grep NELECT OUTCAR",
shell=True,stdin=None,stdout=subprocess.PIPE).communicate()[0]
self.nelec = int(float(nelec.split()[2]))
self.get_bandgap()
# Calculate finite temperature properties
self.temp = np.linspace(0,2000,21)
self.mu_e = np.zeros_like(self.temp)
self.num_e = np.zeros_like(self.temp)
self.num_h = np.zeros_like(self.temp)
self.E_el = np.zeros_like(self.temp)
self.S_el = np.zeros_like(self.temp)
self.F_el = np.zeros_like(self.temp)
# Calculate E_el_0
self.E_el_0 = None
tol = 1e-5
for i in range(len(self.temp)):
if i < tol:
self.mu_e[i] = self.e_fermi
self.E_el[i] = 0.0
self.S_el[i] = 0.0
self.num_e[i] = 0.0
self.num_h[i] = 0.0
elif i > 0.0:
self.mu_e[i] = self.calc_mu_e(self.temp[i])
if self.E_el_0 == None:
self.E_el_0 = self.calc_E_el(self.mu_e[i],self.temp[i])
self.num_e[i] = self.n(self.mu_e[i],self.temp[i])
self.num_h[i] = self.p(self.mu_e[i],self.temp[i])
self.E_el[i] = (self.calc_E_el(self.mu_e[i],self.temp[i]))
self.S_el[i] = self.calc_S_el(self.mu_e[i],self.temp[i])
self.E_el[1:] = self.E_el[1:] - self.E_el_0
self.F_el = self.E_el - self.temp*BOLTZCONST*self.S_el
def read_doscar(self,input_):
"""
Reads in a doscar file to grab the density of states as a function of
energy. The argument input_ is assumed to be a file object.
"""
self.n_atoms = int(input_.readline().split()[0])
# Discard header information
for i in range(4):
input_.readline()
# Read in Fermi Energy
line = input_.readline().split()
self.e_max = float(line[0])
self.e_min = float(line[1])
self.e_fermi = float(line[3])
energy = []; dos_tot = []; dos_spin = []
for line in input_:
line = line.split()
energy.append(float(line[0]))
if len(line) == 3:
dos_tot.append(float(line[1])) # DOS includes spin up and down
dos_spin.append(0.0)
elif len(line) == 5:
dos_tot.append(float(line[1])+float(line[2]))
dos_spin.append(float(line[1])-float(line[2]))
self.energy = np.array(energy)
#self.dos_tot = np.array(dos_tot)/float(self.n_atoms)
self.dos_tot = np.array(dos_tot)
#self.dos_spin = np.array(dos_spin)/float(self.n_atoms)
self.dos_spin = np.array(dos_spin)
self.dos_spline = UnivariateSpline(self.energy,self.dos_tot)
def read_ezvasp_dos(self,input_):
"""
Reads an ezvasp-formatted dos.out file to get the electronic density of
states. The argument input_ is assumned to be a file object.
"""
nions = subprocess.Popen("grep NIONS OUTCAR",
shell=True,stdin=None,stdout=subprocess.PIPE).communicate()[0]
self.n_atoms = int(float(nions.split()[-1]))
self.e_min = 0.0
line = input_.readline().split()
self.nelec = int(float(line[0]))
self.step_size = float(line[1])
self.scale = float(line[2])
energy = []; dos_tot = []
i = 0
for line in input_:
line = line.split()
dos_tot.append(float(line[0]))
energy.append(float(i)*self.step_size)
i += 1
self.energy = np.array(energy)
self.dos_tot = np.array(dos_tot)
self.dos_spin = np.zeros_like(self.dos_tot) # Change this for spin-polar
self.dos_spline = UnivariateSpline(self.energy,self.dos_tot)
self.e_max = self.energy[-1]
# Find the 0 Kelvin 'Fermi Energy' using ATAT's method
ne = 0.0
for i in range(len(self.dos_tot)):
ne += self.dos_tot[i]*self.step_size
e_fermi = self.energy[i]
if ne >= self.nelec:
break
self.e_fermi = e_fermi
def get_bandgap(self):
"""
Finds the band gap of a DOS around the fermi energy.
"""
self.step_size = self.energy[1] - self.energy[0]
i = 0
not_found = True
while not_found:
if self.energy[i] < self.e_fermi and self.dos_tot[i] > 1e-3:
bot = self.energy[i]
elif self.energy[i] > self.e_fermi and self.dos_tot[i] > 1e-3:
top = self.energy[i]
not_found = False
i += 1
if top - bot < 2*self.step_size:
self.vbm = self.cbm = self.e_fermi
self.band_gap = 0.0
else:
self.vbm = bot; self.cbm = top
self.band_gap = top - bot
def shift_energy(self,new_ref):
"""
Change the reference energy for all of the energy attributes.
"""
self.energy = self.energy - new_ref
self.e_min = self.e_min - new_ref
self.e_max = self.e_max - new_ref
self.e_fermi = self.e_fermi - new_ref
self.vbm = self.vbm - new_ref
self.cbm = self.cbm - new_ref
self.mu_e = self.mu_e - new_ref
#def sum_dos(self,weight,start,end,args=None):
def sum_dos(self,weight,start,end,args=None):
"""
Sums the density of states, dos, in the energy range [start,end], weighted
by the function weight, which takes as inputs energy and args.
"""
flag = False
sum = 0.
for i in range(len(self.energy)):
if flag:
sum += self.step_size*self.dos_tot[i]*weight(
self.energy[i],args)
if self.energy[i] > end:
break
elif self.energy[i] >= start:
flag = True
return sum
#def integrate_dos(self,weight,start,end,args=None,threshold=0.1):
def ium_dos(self,weight,start,end,args=None,threshold=0.1):
"""
Takes numpy arrays containing the energy and dos and integrates them over
the range [start,end] with the weighting function weight. Weight should take
as an argument the integrated energy and a list of other arguements args.
"""
def integrand(x,weight,args):
return self.dos_spline(x)*weight(x,args)
result = quad(
integrand,start,end,args=(weight,args),full_output=1,limit=350)
integral = result[0]
error = result[1]
#if error > integral*threshold:
# print "Numerical integration error is greater than"
# print str(threshold)+" of the integrated value."
# sys.exit(1)
return integral
def n(self,mu_e,T):
"""
Calculate the intrinsic number of conduction electrons per atom at an
electron chemical potential mu_e and temperature T.
"""
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
#n = self.integrate_dos(fermi,self.cbm,self.e_max,args=(mu_e,T))
#n = self.sum_dos(fermi,self.cbm,self.e_max,args=(mu_e,T))
n = self.sum_dos(fermi,mu_e,self.e_max,args=(mu_e,T))
return n
def p(self,mu_e,T):
"""
Calculate the intrinsic number of valence holes per atom at an electron
chemical potential of mu_e and temperature T.
"""
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((mu-x)/(BOLTZCONST*T))+1.)
#p = self.integrate_dos(fermi,self.e_min,self.vbm,args=(mu_e,T))
#p = self.sum_dos(fermi,self.e_min,self.vbm,args=(mu_e,T))
p = self.sum_dos(fermi,self.e_min,mu_e,args=(mu_e,T))
return p
def charge_neut2(self,mu_e,args):
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
T = args
n_sum = self.sum_dos(fermi,self.e_min,self.e_max,args=(mu_e,T))
return self.nelec - n_sum
def charge_neutrality(self,mu_e,args):
"""
Condition for charge neutrality for intrinsic doping in a perfect
semiconductor. This function should be overwritten for a more
complicated case.
"""
T = args # Args could also include atomic chemical potentials.
return self.p(mu_e,T) - self.n(mu_e,T)
def calc_mu_e(self,temp):
"""
Calculate the electron chemical potential at temperature temp using the
condition of charge neutrality.
"""
#mu_e = fsolve(self.charge_neutrality,self.e_fermi,args=(temp))
mu_e = fsolve(self.charge_neut2,self.e_fermi,args=(temp))
return mu_e
def calc_E_el(self,mu_e,T):
"""
Calculate the electronic energy at a temperature T and electron chemical
potential mu_e.
"""
def energy(x,args):
return x
def fermi_energy(x,args):
mu = args[0]; T = args[1]
if x-mu < -30.0*BOLTZCONST*T:
return x
elif x-mu > 30.0*BOLTZCONST*T:
return 0.0
else:
return x/(np.exp((x-mu)/(BOLTZCONST*T))+1.)
#E = self.integrate_dos(fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
#E_0 = self.integrate_dos(
# fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
E = self.sum_dos(fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
#E_0 = self.sum_dos(energy,self.e_min,self.e_fermi,args=None)
return E
def calc_S_el(self,mu_e,T):
"""
Calculate the electronic entropy at an electron chemical potential mu_e
and temperature T.
"""
def weight(x,args):
mu = args[0]; T = args[1]
x = (x - mu)/(BOLTZCONST*T)
f = 1.0/(np.exp(x)+1)
if f > 1e-5 and (1.0 - f) > 1e-5:
return -f*np.log(f)-(1.-f)*np.log(1.-f)
else:
return 0.0
#f = -np.log(np.exp(x)+1)/(np.exp(x)+1)
#f += -np.log(np.exp(-x)+1)/(np.exp(-x)+1)
#return f
#S = self.integrate_dos(weight,self.e_min,self.e_max,args=(mu_e,T))
S = self.sum_dos(weight,self.e_min,self.e_max,args=(mu_e,T))
return S
def fermi_dirac_dist(x,args):
"""
Calculates the Fermi-Dirac distribution for an energy x, temperature
args[0], and electron chemical potential args[1].
"""
T = args[0]; mu = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
def test2(argv):
doscar = ElectronicDOS(open(str(argv[0]),'r'))
T = 500
#n = doscar.integrate_dos(
# fermi_dirac_dist,doscar.cbm,doscar.e_max,args=(T,doscar.e_fermi))
p = doscar.p(doscar.e_fermi,T)
print p
def test3(argv):
format = None
if len(argv) > 1:
format = str(argv[1])
doscar = ElectronicDOS(open(str(argv[0]),'r'),format)
print doscar.temp
print doscar.num_e
print doscar.num_h
print doscar.E_el
print doscar.S_el
print doscar.F_el
def atat_test(argv):
format = None
if len(argv) > 1:
format = str(argv[1])
doscar = ElectronicDOS(open(str(argv[0]),'r'),format)
print doscar.E_el_0
for i in range(len(doscar.temp)):
print doscar.temp[i],doscar.mu_e[i],doscar.E_el[i],doscar.S_el[i],doscar.F_el[i]
def test1(argv):
import matplotlib.pyplot as plt
doscar = ElectronicDOS(open(str(argv[0]),'r'))
plt.plot(doscar.energy,doscar.dos_tot)
plt.show()
def main(argv):
import matplotlib.pyplot as plt
doscar = open(str(argv[0]))
e_fermi,energy,n_tot,n_spin = read_doscar(doscar)
plt.plot(energy,n_tot)
if len(argv) > 1:
doscar2 = open(str(argv[1]))
e_fermi2,energy2,n_tot2,n_spin2 = read_doscar(doscar2)
plt.plot(energy2,n_tot2)
plt.show()
if __name__ == "__main__":
import sys
#test3(sys.argv[1:])
atat_test(sys.argv[1:])
| mit |
magne-max/zipline-ja | tests/history/generate_csvs.py | 8 | 5432 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import pandas as pd
from zipline.finance.trading import TradingEnvironment
from zipline.data.us_equity_minutes import BcolzMinuteBarWriter
def generate_daily_test_data(first_day,
last_day,
starting_open,
starting_volume,
multipliers_list,
path):
days = TradingEnvironment.instance().days_in_range(first_day, last_day)
days_count = len(days)
o = np.zeros(days_count, dtype=np.uint32)
h = np.zeros(days_count, dtype=np.uint32)
l = np.zeros(days_count, dtype=np.uint32)
c = np.zeros(days_count, dtype=np.uint32)
v = np.zeros(days_count, dtype=np.uint32)
last_open = starting_open * 1000
last_volume = starting_volume
for idx in range(days_count):
new_open = last_open + round((random.random() * 5), 2)
o[idx] = new_open
h[idx] = new_open + round((random.random() * 10000), 2)
l[idx] = new_open - round((random.random() * 10000), 2)
c[idx] = (h[idx] + l[idx]) / 2
v[idx] = int(last_volume + (random.randrange(-10, 10) * 1e4))
last_open = o[idx]
last_volume = v[idx]
# now deal with multipliers
if len(multipliers_list) > 0:
range_start = 0
for multiplier_info in multipliers_list:
range_end = days.searchsorted(multiplier_info[0])
# dividing by the multiplier because we're going backwards
# and generating the original data that will then be adjusted.
o[range_start:range_end] /= multiplier_info[1]
h[range_start:range_end] /= multiplier_info[1]
l[range_start:range_end] /= multiplier_info[1]
c[range_start:range_end] /= multiplier_info[1]
v[range_start:range_end] *= multiplier_info[1]
range_start = range_end
df = pd.DataFrame({
"open": o,
"high": h,
"low": l,
"close": c,
"volume": v
}, columns=[
"open",
"high",
"low",
"close",
"volume"
], index=days)
df.to_csv(path, index_label="day")
def generate_minute_test_data(first_day,
last_day,
starting_open,
starting_volume,
multipliers_list,
path):
"""
Utility method to generate fake minute-level CSV data.
:param first_day: first trading day
:param last_day: last trading day
:param starting_open: first open value, raw value.
:param starting_volume: first volume value, raw value.
:param multipliers_list: ordered list of pd.Timestamp -> float, one per day
in the range
:param path: path to save the CSV
:return: None
"""
full_minutes = BcolzMinuteBarWriter.full_minutes_for_days(
first_day, last_day)
minutes_count = len(full_minutes)
minutes = TradingEnvironment.instance().minutes_for_days_in_range(
first_day, last_day)
o = np.zeros(minutes_count, dtype=np.uint32)
h = np.zeros(minutes_count, dtype=np.uint32)
l = np.zeros(minutes_count, dtype=np.uint32)
c = np.zeros(minutes_count, dtype=np.uint32)
v = np.zeros(minutes_count, dtype=np.uint32)
last_open = starting_open * 1000
last_volume = starting_volume
for minute in minutes:
# ugly, but works
idx = full_minutes.searchsorted(minute)
new_open = last_open + round((random.random() * 5), 2)
o[idx] = new_open
h[idx] = new_open + round((random.random() * 10000), 2)
l[idx] = new_open - round((random.random() * 10000), 2)
c[idx] = (h[idx] + l[idx]) / 2
v[idx] = int(last_volume + (random.randrange(-10, 10) * 1e4))
last_open = o[idx]
last_volume = v[idx]
# now deal with multipliers
if len(multipliers_list) > 0:
for idx, multiplier_info in enumerate(multipliers_list):
start_idx = idx * 390
end_idx = start_idx + 390
# dividing by the multipler because we're going backwards
# and generating the original data that will then be adjusted.
o[start_idx:end_idx] /= multiplier_info[1]
h[start_idx:end_idx] /= multiplier_info[1]
l[start_idx:end_idx] /= multiplier_info[1]
c[start_idx:end_idx] /= multiplier_info[1]
v[start_idx:end_idx] *= multiplier_info[1]
df = pd.DataFrame({
"open": o,
"high": h,
"low": l,
"close": c,
"volume": v
}, columns=[
"open",
"high",
"low",
"close",
"volume"
], index=minutes)
df.to_csv(path, index_label="minute")
| apache-2.0 |
xwolf12/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
DouglasLeeTucker/DECam_PGCM | bin/rawdata_se_objects_split.py | 1 | 5996 | #!/usr/bin/env python
"""
rawdata_se_objects_split.py
Example:
rawdata_se_objects_split.py --help
rawdata_se_objects_split.py --inputFileListFile inputfilelist.csv
--outputFileListFile outputfilelist.csv
--verbose 2
"""
##################################
def main():
import os
import argparse
import time
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--inputFileListFile', help='name of CSV file containing list of input files', default='inputfilelist.csv')
parser.add_argument('--outputFileListFile', help='name of CSV file containing list of filter bands and output files', default='outputfilelist.csv')
parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int)
args = parser.parse_args()
if args.verbose > 0: print args
# Execute method...
status = rawdata_se_objects_split(args)
##################################
# rawdata_se_objects_split
#
# Based on sepBands from y2a1_tertiaries.py
#
def rawdata_se_objects_split(args):
import os
import datetime
import numpy as np
import pandas as pd
from astropy.table import Table
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'rawdata_se_objects_split'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Read in inputFileListFile...
inputFileListFile = args.inputFileListFile
if os.path.isfile(inputFileListFile)==False:
print """Input filelist file %s does not exist...""" % (inputFileListFile)
print """Exiting rawdata_se_objects_split method with return code 1"""
return 1
inputFileListDF = pd.read_csv(inputFileListFile,
header=None,
names=['FILENAME'],
comment='#')
inputFileListDF['FILENAME'] = inputFileListDF['FILENAME'].str.strip()
inputFileListSeries = inputFileListDF['FILENAME']
inputFileList = inputFileListSeries.values.tolist()
if args.verbose>1:
print 'Input file list:'
print inputFileList
# Read in outputFileListFile and convert it to a python
# dictionary, ensuring there are no extraneous white spaces
# in the file names listed...
outputFileListFile = args.outputFileListFile
if os.path.isfile(outputFileListFile)==False:
print """Output filelist file %s does not exist...""" % (outputFileListFile)
print """Exiting rawdata_se_objects_split method with return code 1"""
return 1
outputFileListDF = pd.read_csv(outputFileListFile,
header=None,
names=['BAND','FILENAME'],
index_col='BAND',
comment='#')
outputFileListDF['FILENAME'] = outputFileListDF['FILENAME'].str.strip()
outputFileListSeries = outputFileListDF['FILENAME']
outputFileListDict = outputFileListSeries.to_dict()
# Also, grab the band list from the outputFileListFile series...
bandList = outputFileListSeries.index.values.tolist()
if args.verbose>1:
print 'Output file list dictionary:'
print outputFileListDict
print 'Band list:'
print bandList
# Loop through inputFileList...
firstFile=True
for inputFile in inputFileList:
if args.verbose > 1:
print """Working on input file %s...""" % inputFile
print datetime.datetime.now()
# Read in file...
t = Table.read(inputFile)
# Convert astropy Table to pandas dataframe...
df = t.to_pandas()
# Verify that 'BAND' is a column in the dataframe;
# otherwise, skip...
if 'BAND' not in df.columns:
print """Could not find 'BAND' in header of %s... Skipping""" \
% (inputFile)
del df
continue
# Verify that 'FILENAME' is a column in the dataframe;
# otherwise, skip...
if 'FILENAME' not in df.columns:
print """Could not find 'FILENAME' in header of %s... Skipping""" \
% (inputFile)
del df
continue
# Trim leading and trailing white space from the FILENAME column...
df['FILENAME'] = df['FILENAME'].str.strip()
# Trim leading and trailing white space from the BAND column...
df['BAND'] = df['BAND'].str.strip()
# If this is the first (valid) file, create initial
# output files (empty except for the CSV header)...
if firstFile is True:
for band in bandList:
outputFile = outputFileListDict[band]
# Create a mask with all entries equal to False...
mask = pd.Series(np.zeros(df.BAND.size, dtype=bool))
df[mask].to_csv(outputFile,index=False)
firstFile = False
# Loop through band list, appending the rows from
# each band to the appropriate output file...
for band in bandList:
outputFile = outputFileListDict[band]
mask = (df.BAND == band)
# Faster if we move the "open" to outside the loop?:
with open(outputFile, 'a') as f:
df[mask].to_csv(f, index=False, header=False)
f.close()
# Clean up some space before moving to next file...
del df
#del t
if args.verbose > 1:
print datetime.datetime.now()
if args.verbose>0: print
return 0
##################################
if __name__ == "__main__":
main()
##################################
| gpl-3.0 |
arabenjamin/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 129 | 43401 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
foolcage/fooltrader | fooltrader/spiders/america/sp500_spider.py | 1 | 3418 | # -*- coding: utf-8 -*-
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import Selector
from scrapy import signals
from fooltrader.contract.files_contract import get_kdata_path
from fooltrader.utils.utils import index_df_with_time, to_time_str, to_float
class Sp500Spider(scrapy.Spider):
name = "sp500_spider"
def __init__(self, name=None, **kwargs):
super().__init__(name, **kwargs)
self.security_item = {'id': 'index_nasdaq_sp500',
'code': 'SP500',
'name': 'SP500',
'listDate': '1871-01-01',
'timestamp': '1871-01-01',
'exchange': 'nasdaq',
'type': 'index'}
self.df_close = pd.DataFrame()
self.df_pe = pd.DataFrame()
def start_requests(self):
pe_url = 'http://www.multpl.com/table?f=m'
price_url = 'http://www.multpl.com/s-p-500-historical-prices/table/by-month'
yield Request(url=pe_url,
callback=self.download_sp500_pe)
yield Request(url=price_url,
callback=self.download_sp500_price)
def download_sp500_price(self, response):
trs = response.xpath('//*[@id="datatable"]/tr').extract()
price_jsons = []
try:
for tr in trs[1:]:
tds = Selector(text=tr).xpath('//td//text()').extract()
tds = [x.strip() for x in tds if x.strip()]
price_jsons.append({"timestamp": to_time_str(tds[0]),
"close": to_float(tds[1])})
if price_jsons:
self.df_close = self.df_close.append(price_jsons, ignore_index=True)
self.df_close = index_df_with_time(self.df_close)
except Exception as e:
self.logger.exception('error when getting sp500 price url={} error={}'.format(response.url, e))
def download_sp500_pe(self, response):
trs = response.xpath('//*[@id="datatable"]/tr').extract()
price_jsons = []
try:
for tr in trs[1:]:
tds = Selector(text=tr).xpath('//td//text()').extract()
tds = [x.strip() for x in tds if x.strip()]
price_jsons.append({"timestamp": to_time_str(tds[0]),
"pe": to_float(tds[1])})
if price_jsons:
self.df_pe = self.df_pe.append(price_jsons, ignore_index=True)
self.df_pe = index_df_with_time(self.df_pe)
except Exception as e:
self.logger.exception('error when getting sp500 pe url={} error={}'.format(response.url, e))
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(Sp500Spider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
self.df_pe['close'] = self.df_close['close']
self.df_pe['code'] = self.security_item['code']
self.df_pe['securityId'] = self.security_item['id']
self.df_pe['name'] = self.security_item['name']
self.df_pe.to_csv(get_kdata_path(self.security_item), index=False)
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
| mit |
flennerhag/mlens | mlens/ensemble/tests/test_a_sklearn.py | 1 | 7958 | """
Test Scikit-learn
"""
import numpy as np
from mlens.ensemble import SuperLearner, Subsemble, BlendEnsemble, TemporalEnsemble
from mlens.testing.dummy import return_pickled
try:
from sklearn.utils.estimator_checks import check_estimator
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.datasets import load_boston
has_sklearn = True
except ImportError:
has_sklearn = False
if has_sklearn:
X, y = load_boston(True)
estimators = [Lasso(),
GradientBoostingRegressor(),
LinearRegression(),
KNeighborsRegressor(),
SVR(gamma='scale'),
RandomForestRegressor(n_estimators=100),
]
est_prep = {'prep1': estimators,
'prep2': estimators,
'prep3': estimators}
prep_1 = [PCA()]
prep_2 = [PolynomialFeatures(), StandardScaler()]
prep = {'prep1': prep_1,
'prep2': prep_2,
'prep3': []}
def get_ensemble(cls, backend, preprocessing, **kwargs):
"""Get ensemble."""
if preprocessing:
est = est_prep
else:
est = estimators
ens = cls(backend=backend, **kwargs)
ens.add(est, preprocessing)
ens.add(LinearRegression(), meta=True)
return ens
def test_super_learner_s_m():
"""[SuperLearner] Test scikit-learn comp - mp | np"""
ens = get_ensemble(SuperLearner, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_f_m():
"""[SuperLearner] Test scikit-learn comp - mp | p"""
ens = get_ensemble(SuperLearner, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_s_t():
"""[SuperLearner] Test scikit-learn comp - th | np"""
ens = get_ensemble(SuperLearner, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_f_t():
"""[SuperLearner] Test scikit-learn comp - th | p"""
ens = get_ensemble(SuperLearner, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_s_m():
"""[Subsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(Subsemble, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_f_m():
"""[Subsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(Subsemble, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_s_t():
"""[Subsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(Subsemble, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_f_t():
"""[Subsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(Subsemble, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_s_m():
"""[BlendEnsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(BlendEnsemble, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_f_m():
"""[BlendEnsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(BlendEnsemble, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_s_m():
"""[BlendEnsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(BlendEnsemble, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_f_m():
"""[BlendEnsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(BlendEnsemble, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_s_m():
"""[TemporalEnsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(TemporalEnsemble, 'multiprocessing', None, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_f_m():
"""[TemporalEnsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(TemporalEnsemble, 'multiprocessing', prep, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_s_m():
"""[TemporalEnsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(TemporalEnsemble, 'threading', None, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_f_m():
"""[TemporalEnsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(TemporalEnsemble, 'threading', prep, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
| mit |
paveldedik/thesis | models/models.py | 1 | 30072 | # -*- coding: utf-8 -*-
"""
Evaluation Models
=================
"""
from __future__ import division
from copy import copy
from itertools import izip
from collections import defaultdict
import numpy as np
import pandas as pd
import tools
__all__ = (
'DummyPriorModel',
'EloModel',
'EloResponseTime',
'PFAModel',
'PFAResponseTime',
'PFAExt',
'PFAExtTiming',
'PFAExtStaircase',
'PFAExtSpacing',
'PFAGong',
'PFAGongTiming',
'PFATiming',
)
#: Dictionary of the most commonly used time effect functions in this thesis.
time_effect_funcs = {}
def register_time_effect(name):
"""Registers new time effect functions."""
def register(time_effect):
time_effect_funcs[name] = time_effect
return register
@register_time_effect('log')
def time_effect_log(t, a=1.8, c=0.123):
return a - c * np.log(t)
@register_time_effect('pow')
def time_effect_div(t, a=2, c=0.2):
return a / (t+1) ** c
@register_time_effect('exp')
def time_effect_exp(t, a=1.6, c=0.01):
return a * np.exp(-c * np.sqrt(t))
def init_time_effect(obj, name, parameters=('a', 'c')):
"""Prepares time effect function based on name. Initializes
the given object with default parameters `a` and `c`.
:param obj: Object to initialize with time effect function.
:param name: Name of the time effect function.
"""
time_effect_fun = time_effect_funcs[name]
defaults = time_effect_fun.func_defaults
a, c = parameters
if getattr(obj, a, None) is None:
setattr(obj, a, defaults[0])
if getattr(obj, c, None) is None:
setattr(obj, c, defaults[1])
def time_effect(t):
a_val, c_val = getattr(obj, a), getattr(obj, c)
return time_effect_fun(t, a_val, c_val)
return time_effect
class Question(object):
"""Representation of a question."""
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.user_id = kwargs.pop('user_id')
self.place_id = kwargs.pop('place_id')
self.type = kwargs.pop('type')
self.inserted = kwargs.pop('inserted')
self.options = kwargs.pop('options')
class Answer(Question):
"""Answer to a question."""
def __init__(self, **kwargs):
super(Answer, self).__init__(**kwargs)
self.place_answered = kwargs.pop('place_answered')
self.response_time = kwargs.pop('response_time')
self.is_correct = kwargs.pop('is_correct')
class User(object):
"""Returns a user with given ID.
:param user_id: ID of the user.
:type user_id: int
"""
def __init__(self, user_id):
self.id = user_id
self.skill_increments = []
@property
def skill(self):
"""Skill of the user."""
return sum(self.skill_increments)
@property
def answers_count(self):
"""Number of answer of the user (equal to the number of
skill increments.
"""
return len(self.skill_increments)
def inc_skill(self, increment):
"""Increments the skill of the user.
:param increment: Increment (or decrement) of the skill.
:type increment: int
"""
self.skill_increments += [increment]
class Place(object):
"""Returns a place with given ID.
:param place_id: ID of the place.
:type place_id: int
"""
def __init__(self, place_id):
self.id = place_id
self.difficulty_increments = []
@property
def difficulty(self):
"""Difficulty of the place."""
return sum(self.difficulty_increments)
@property
def answers_count(self):
"""Number of answer for the place (equal to the number of
difficulty increments.
"""
return len(self.difficulty_increments)
def inc_difficulty(self, increment):
"""Increments the difficulty of the place.
:param increment: Increment (or decrement) of the difficulty.
:type increment: int
"""
self.difficulty_increments += [increment]
class Item(object):
"""Item representation.
:param prior: Prior skills of users and difficulties of places.
:type prior: dictionary
:param user_id: ID of the user.
:type user_id: int
:param place_id: ID of the place.
:type place_id: int
"""
def __init__(self, prior, user_id, place_id):
self.prior = prior
self.user_id = user_id
self.place_id = place_id
self.practices = []
self.knowledge_increments = []
@property
def user(self):
"""User answering the item."""
return self.prior.users[self.user_id]
@property
def place(self):
"""Place of the item being asked."""
return self.prior.places[self.place_id]
@property
def knowledge(self):
"""Knowledge of the item by the user."""
return (
(self.user.skill - self.place.difficulty)
+ sum(self.knowledge_increments)
)
@property
def correct(self):
"""List of correct answers."""
return [ans for ans in self.practices if ans.is_correct]
@property
def incorrect(self):
"""List of incorrect answers."""
return [ans for ans in self.practices if not ans.is_correct]
@property
def last_inserted(self):
"""Returns the time of the last answer for this item
or :obj:`None` if the item was never answered before.
"""
if self.practices:
return self.practices[-1].inserted
@property
def any_incorrect(self):
""":obj:`True` if at least one of the practiced item
was answered incorrectly, otherwise :obj:`False`.
"""
return any(not answer.is_correct for answer in self.practices)
def get_diffs(self, current):
"""Returns list of previous practices expresed as the number
of seconds that passed between *current* practice and all
the *previous* practices.
:param current: Datetime of the current practice.
:type place: string
"""
return [
tools.time_diff(current, prior.inserted)
for prior in self.practices
]
def inc_knowledge(self, increment):
"""Increments the knowledge of the user of the item.
:param increment: Increment (or decrement) of the knowledge.
:type increment: int
"""
self.knowledge_increments += [increment]
def add_practice(self, answer):
"""Registers new practice of the item.
:param answer: Information about the answer.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
if isinstance(answer, pd.Series):
self.practices += [Answer(**answer.to_dict())]
else:
self.practices += [copy(answer)]
class Model(object):
"""Abstract model class."""
ABBR = None
def respect_guess(self, prediction, options):
"""Updates prediction with respect to guessing paramter.
:param prediction: Prediction calculated so far.
:type prediction: float
:param options: Number of options in the multiple-choice question.
:type options: int
"""
if options:
val = 1 / len(options)
return val + (1 - val) * prediction
else:
return prediction
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
raise NotImplementedError()
def update(self, answer):
"""Performes an update of skills, difficulties or knowledge.
:param answer: Asked question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
raise NotImplementedError()
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
raise NotImplementedError()
@classmethod
def split_data(cls, data, ratio=0.7):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
raise NotImplementedError()
class DummyPriorModel(Model):
"""Dummy model that sets all skills of users and difficulties
of places to zero.
"""
class _User(object):
"""Returns a user with given ID."""
def __init__(self, skill):
self.skill = skill
class _Place(object):
"""Returns a place with given ID."""
def __init__(self, difficulty):
self.difficulty = difficulty
def __init__(self, skill=0.0, difficulty=0.0):
self.users = defaultdict(lambda: self._User(skill))
self.places = defaultdict(lambda: self._Place(difficulty))
def update(self, answer):
pass
def train(self, data):
pass
class EloModel(Model):
"""Predicts correctness of answers using Elo Rating System.
The model is parametrized with `alpha` and `beta`. These parameters
affect the uncertainty function.
"""
ABBR = 'Elo'
def __init__(self, alpha=1, beta=0.05):
self.alpha = alpha
self.beta = beta
self.init_model()
def init_model(self):
"""Initializes two attributes of the model. Both attributes are
dataframes. The first attribute represents difficulties of countries.
The second attribute represents global knowledge of students.
"""
self.places = tools.keydefaultdict(Place)
self.users = tools.keydefaultdict(User)
self.predictions = {}
def uncertainty(self, n):
"""Uncertainty function. The purpose is to make each update on
the model trained with sequence of `n` answers less and less
significant as the number of prior answers is bigger.
:param n: Number of user's answers or total answers to a place.
:type n: int
"""
return self.alpha / (1 + self.beta * n)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
user = self.users[question.user_id]
place = self.places[question.place_id]
prediction = tools.sigmoid(user.skill - place.difficulty)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Updates skills of users and difficulties of places according
to given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series`
"""
user = self.users[answer.user_id]
place = self.places[answer.place_id]
prediction = self.predict(answer)
shift = answer.is_correct - prediction
user.inc_skill(self.uncertainty(user.answers_count) * shift)
place.inc_difficulty(-(self.uncertainty(place.answers_count) * shift))
self.predictions[answer.id] = prediction
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
self.init_model()
data = tools.first_answers(data)
data.sort(['inserted']).apply(self.update, axis=1)
@classmethod
def split_data(cls, data, ratio=0.7):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
data = tools.first_answers(data)
return tools.split_data(data, ratio=ratio)
class EloResponseTime(EloModel):
"""Extension of the Elo model that takes response time of user
into account.
"""
ABBR = 'Elo/RT'
def __init__(self, *args, **kwargs):
self.zeta = kwargs.pop('zeta', 3)
super(EloResponseTime, self).__init__(*args, **kwargs)
def update(self, answer):
"""Updates skills of users and difficulties of places according
to given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
user = self.users[answer.user_id]
place = self.places[answer.place_id]
prediction = self.predict(answer)
level = tools.automaticity_level(answer.response_time)
prob = (prediction * self.zeta + level) / (self.zeta + 1)
shift = answer.is_correct - prob
user.inc_skill(self.uncertainty(user.answers_count) * shift)
place.inc_difficulty(-(self.uncertainty(place.answers_count) * shift))
self.predictions[answer.id] = prediction
class PFAModel(Model):
"""Standard Performance Factor Analysis.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
"""
ABBR = 'PFA'
def __init__(self, prior=None, gamma=3.4, delta=-0.3):
super(PFAModel, self).__init__()
self.prior = prior or DummyPriorModel()
self.gamma = gamma
self.delta = delta
self.init_model()
def init_model(self):
"""Initializes attribute of the model that stores current
knowledge of places for all students.
"""
self.items = tools.keydefaultdict(
lambda *args: Item(self.prior, *args)
)
self.predictions = {}
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
knowledge = (
item.knowledge +
self.gamma * len(item.correct) +
self.delta * len(item.incorrect)
)
return tools.sigmoid(knowledge)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
self.init_model()
data.sort(['inserted']).apply(self.update, axis=1)
@classmethod
def split_data(self, data):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
"""
test_set = tools.last_answers(data)
train_set = data[~data['id'].isin(test_set['id'])]
return train_set, test_set
class PFAExt(PFAModel):
"""PFA model for estimation of current knowledge.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
"""
ABBR = 'PFA/E'
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
prediction = tools.sigmoid(item.knowledge)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
if answer.is_correct:
item.inc_knowledge(self.gamma * (1 - prediction))
else:
item.inc_knowledge(self.delta * prediction)
class PFAResponseTime(PFAExt):
"""An extended version of the PFAExt model which alters student's
knowledge by respecting past response times.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
:param zeta: The significance of response times.
:type zeta: float
"""
ABBR = 'PFA/E/RT'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 1.5)
kwargs.setdefault('delta', -1.4)
self.zeta = kwargs.pop('zeta', 1.9)
super(PFAResponseTime, self).__init__(*args, **kwargs)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
level = tools.automaticity_level(answer.response_time) / self.zeta
if answer.is_correct:
item.inc_knowledge(self.gamma * (1 - prediction) + level)
else:
item.inc_knowledge(self.delta * prediction + level)
class PFAExtTiming(PFAExt):
"""Alternative version of :class:`PFAExtSpacing` which ignores
spacing effect. Only forgetting is considered.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
:param time_effect_fun: Time effect function.
:type time_effect_fun: callable or string
"""
ABBR = 'PFA/E/T'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.3)
kwargs.setdefault('delta', -0.9)
time_effect = kwargs.pop('time_effect_fun', 'poly')
if isinstance(time_effect, basestring):
self.a, self.c = kwargs.pop('a', None), kwargs.pop('c', None)
self.time_effect = init_time_effect(self, time_effect)
else:
self.time_effect = time_effect
super(PFAExtTiming, self).__init__(*args, **kwargs)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
if item.practices:
seconds = tools.time_diff(question.inserted, item.last_inserted)
time_effect = self.time_effect(seconds)
else:
time_effect = 0
prediction = tools.sigmoid(item.knowledge + time_effect)
return self.respect_guess(prediction, question.options)
class PFAExtStaircase(PFAExtTiming):
"""Alternative version of :class:`PFAESpacing` which ignores
spacing effect. Only forgetting is considered given by staircase
fucntion.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
:param time_effect_fun: Values for staircase function.
:type time_effect_fun: dict (tuples as keys)
"""
ABBR = 'PFA/E/T staircase'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.5)
kwargs.setdefault('delta', -0.8)
self.staircase = tools.intervaldict(kwargs.pop('staircase'))
self.time_effect = lambda k: self.staircase[k]
super(PFAExtTiming, self).__init__(*args, **kwargs)
class PFAExtSpacing(PFAExtTiming):
"""Extended version of PFA that takes into account the effect of
forgetting and spacing.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param spacing_rate: The significance of the spacing effect. Lower
values make the effect less significant. If the spacing rate
is set to zero, the model is unaware of the spacing effect.
:type spacing_rate: float
:param decay_rate: The significance of the forgetting effect. Higher
values of decay rate make the students forget the item faster
and vice versa.
:type decay_rate: float
"""
ABBR = 'PFA/E/S'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.8)
kwargs.setdefault('delta', -0.7)
self.spacing_rate = kwargs.pop('spacing_rate', 0)
self.decay_rate = kwargs.pop('decay_rate', 0.18)
self.iota = kwargs.pop('iota', 1.5)
super(PFAExtSpacing, self).__init__(*args, **kwargs)
def memory_strength(self, question):
"""Estimates memory strength of an item.
:param question: Asked question.
:type question: :class:`pandas.Series`
"""
item = self.items[question.user_id, question.place_id]
practices = item.get_diffs(question.inserted)
if len(practices) > 0:
return self.iota + tools.memory_strength(
filter(lambda x: x > 0, practices),
spacing_rate=self.spacing_rate,
decay_rate=self.decay_rate,
)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
if item.any_incorrect:
strength = self.memory_strength(question)
else:
strength = 0
prediction = tools.sigmoid(item.knowledge + strength)
return self.respect_guess(prediction, question.options)
class PFAGong(PFAModel):
"""Yue Gong's extended Performance Factor Analysis.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param decay: Decay rate of answers.
:type decay: float
"""
ABBR = 'PFA/G'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.1)
kwargs.setdefault('delta', -0.8)
self.decay = kwargs.pop('decay', 0.8)
super(PFAGong, self).__init__(*args, **kwargs)
def get_weights(self, item, question):
"""Returns weights of previous answers to the given item.
:param item: *Item* (i.e. practiced place by a user).
:type item: :class:`Item`
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
correct_weights = [
ans.is_correct * self.decay ** k for k, ans
in tools.reverse_enumerate(item.practices)
]
incorrect_weights = [
(1 - ans.is_correct) * self.decay ** k for k, ans
in tools.reverse_enumerate(item.practices)
]
return sum(correct_weights), sum(incorrect_weights)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
correct_weight, incorrect_weight = self.get_weights(item, question)
knowledge = (
item.knowledge +
self.gamma * correct_weight +
self.delta * incorrect_weight
)
prediction = tools.sigmoid(knowledge)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
class PFAGongTiming(PFAGong):
"""Performance Factor Analysis combining some aspects of both
the Yue Gong's PFA and the ACT-R model.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param time_effect_fun: Time effect function.
:type time_effect_fun: callable or string
"""
ABBR = 'PFA/G/T old'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 1.7)
kwargs.setdefault('delta', 0.5)
time_effect = kwargs.pop('time_effect_fun', 'pow')
if isinstance(time_effect, basestring):
self.a, self.c = kwargs.pop('a', None), kwargs.pop('c', None)
self.time_effect = init_time_effect(self, time_effect)
else:
self.time_effect = time_effect
super(PFAGong, self).__init__(*args, **kwargs)
def get_weights(self, item, question):
"""Returns weights of previous answers to the given item.
:param item: *Item* (i.e. practiced place by a user).
:type item: :class:`Item`
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
correct_weights = [
max(ans.is_correct * self.time_effect(diff), 0) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
incorrect_weights = [
(1 - ans.is_correct) * self.time_effect(diff) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
return sum(correct_weights), sum(incorrect_weights)
class PFATiming(PFAGong):
"""Performance Factor Analysis combining some aspects of both
the Yue Gong's PFA and the ACT-R model.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param time_effect_good: Time effect function for correct answers.
:type time_effect_good: callable or string
:param time_effect_bad: Time effect function for wrong answers.
:type time_effect_bad: callable or string
"""
ABBR = 'PFA/G/T'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 1) # these parameters should not be
kwargs.setdefault('delta', 1) # modified, i.e. kept equal to 1
time_effect_good = kwargs.pop('time_effect_good', 'pow')
time_effect_bad = kwargs.pop('time_effect_bad', 'pow')
if isinstance(time_effect_good, basestring):
self.a, self.c = kwargs.pop('a', None), kwargs.pop('c', None)
self.time_effect_good = init_time_effect(
self, time_effect_good, parameters=('a', 'c'))
else:
self.time_effect_good = time_effect_good
if isinstance(time_effect_bad, basestring):
self.b, self.d = kwargs.pop('b', None), kwargs.pop('d', None)
self.time_effect_bad = init_time_effect(
self, time_effect_bad, parameters=('b', 'd'))
else:
self.time_effect_bad = time_effect_bad
super(PFAGong, self).__init__(*args, **kwargs)
def get_weights(self, item, question):
"""Returns weights of previous answers to the given item.
:param item: *Item* (i.e. practiced place by a user).
:type item: :class:`Item`
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
correct_weights = [
ans.is_correct * self.time_effect_good(diff) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
incorrect_weights = [
(1 - ans.is_correct) * self.time_effect_bad(diff) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
return sum(correct_weights), sum(incorrect_weights)
| mit |
gotomypc/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
giruenf/GRIPy | app/app_utils.py | 1 | 29345 | import re
import os
import json
import importlib
import timeit
import inspect
import collections
from enum import Enum
from pathlib import Path
import numpy as np
from matplotlib.cm import cmap_d
import wx
import app
import fileio
from classes.om.base.manager import ObjectManager
from app import log
class GripyBitmap(wx.Bitmap):
def __init__(self, path_to_bitmap=None):
if path_to_bitmap is None:
super().__init__()
return
if os.path.exists(path_to_bitmap):
full_file_name = path_to_bitmap
elif os.path.exists(os.path.join(app.ICONS_PATH, \
path_to_bitmap)):
full_file_name = os.path.join(app.ICONS_PATH, path_to_bitmap)
else:
raise Exception('ERROR: Wrong bitmap path [{}, {}].'.format( \
app.ICONS_PATH, path_to_bitmap)
)
super().__init__(full_file_name)
class GripyIcon(wx.Icon):
def __init__(self, path_to_bitmap=None, type_=wx.BITMAP_TYPE_ANY):
# print(PurePath(app.ICONS_PATH, path_to_bitmap), 'r')
if path_to_bitmap is not None:
if Path(path_to_bitmap).exists():
pass
elif Path(app.ICONS_PATH, path_to_bitmap).exists():
path_to_bitmap = Path(app.ICONS_PATH, path_to_bitmap)
else:
raise Exception('ERROR: Wrong bitmap path.')
super().__init__(path_to_bitmap, type_)
def calc_well_time_from_depth(event, well_uid):
OM = ObjectManager()
well = OM.get(well_uid)
vp = None
for log_obj in OM.list('log', well.uid):
if log_obj.datatype == 'Velocity':
vp = log_obj
break
if vp is None:
raise Exception('ERROR [calc_prof_tempo]: Vp log not found.')
index_set = OM.get(vp.index_set_uid)
md = index_set.get_z_axis_indexes_by_type('MD')[0]
#
if md.data[0] != 0.0:
return
owt = [0.0]
#
for idx in range(1, len(md.data)):
if vp.data[idx - 1] == np.nan:
raise Exception('ERROR [calc_prof_tempo]: Found np.nan on Vp[{}] '.format(idx - 1))
diff_prof = md.data[idx] - md.data[idx - 1]
value = (float(diff_prof) / vp.data[idx - 1]) * 1000.0 # To milliseconds
value = owt[idx - 1] + value
owt.append(value)
#
owt = np.array(owt)
twt = owt * 2.0
#
print('\nOWT:', owt)
#
owt_index = OM.new('data_index', 0, 'One Way Time', 'TIME', 'ms', data=owt)
OM.add(owt_index, index_set.uid)
#
twt_index = OM.new('data_index', 0, 'Two Way Time', 'TWT', 'ms', data=twt)
OM.add(twt_index, index_set.uid)
#
def load_segy(event, filename, new_obj_name='', comparators_list=None,
iline_byte=9, xline_byte=21, offset_byte=37, tid='seismic',
datatype='amplitude', parentuid=None):
OM = ObjectManager()
disableAll = wx.WindowDisabler()
wait = wx.BusyInfo("Loading SEG-Y file...")
#
try:
print("\nLoading SEG-Y file...")
segy_file = fileio.segy.SEGYFile(filename)
# segy_file.print_dump()
# """
segy_file.read(comparators_list)
segy_file.organize_3D_data(iline_byte, xline_byte, offset_byte)
#
print('segy_file.traces.shape:', segy_file.traces.shape)
#
#
seis_like_obj = OM.new(tid, segy_file.traces, name=new_obj_name,
datatype=datatype
)
if not OM.add(seis_like_obj, parentuid):
raise Exception('Object was not added. tid={}'.format(tid))
#
#
z_index = OM.new('data_index',
name='Time',
datatype='TWT',
unit='ms',
start=0.0,
step=(segy_file.sample_rate * 1000),
samples=segy_file.number_of_samples
)
OM.add(z_index, seis_like_obj.uid)
#
try:
offset_index = OM.new('data_index',
segy_file.dimensions[2],
name='Offset',
datatype='OFFSET',
unit='m'
)
OM.add(offset_index, seis_like_obj.uid)
next_dim = 2
except Exception as e:
next_dim = 1
#
xline_index = OM.new('data_index',
segy_file.dimensions[1],
name='X Line',
datatype='X_LINE'
)
if OM.add(xline_index, seis_like_obj.uid):
next_dim += 1
#
iline_index = OM.new('data_index',
segy_file.dimensions[0],
name='I Line',
datatype='I_LINE'
)
OM.add(iline_index, seis_like_obj.uid)
#
seis_like_obj._create_data_index_map(
[iline_index.uid],
[xline_index.uid],
[offset_index.uid],
[z_index.uid]
)
print('seis_like_obj.traces.shape:', seis_like_obj.data.shape)
# """
except Exception as e:
raise e
finally:
del wait
del disableAll
#
# TODO: Verificar melhor opcao no Python 3.6
#
CallerInfo = collections.namedtuple('CallerInfo',
['object_', 'class_', 'module', 'function_name',
'filename', 'line_number', 'line_code'
]
)
def get_callers_stack():
"""
Based on: https://gist.github.com/techtonik/2151727 with some
changes.
Get a list with caller modules, objects and functions in the stack
with list index 0 being the latest call.
Returns:
list(collections.namedtuple('CallerInfo',
['object_', 'class_', 'module', 'function_name',
'filename', 'line_number', 'line_code']))
"""
ret_list = []
print('app_utils.get_callers_stack')
try:
stack = inspect.stack()
for i in range(1, len(stack)):
fi = stack[i]
module_ = None
obj = fi.frame.f_locals.get('self', None)
class_ = fi.frame.f_locals.get('__class__', None)
if obj:
module_ = inspect.getmodule(obj)
if not class_ and obj:
class_ = obj.__class__
ret_list.append(
CallerInfo(object_=obj, class_=class_, module=module_,
function_name=fi.function, filename=fi.filename,
line_number=fi.lineno, line_code=fi.code_context,
# index=fi.index,
# traceback=traceback, f_locals=fi.frame.f_locals
)
)
if fi.frame.f_locals.get('__name__') == '__main__':
break
except Exception as e:
print(e)
raise
return ret_list
def get_class_full_name(obj):
try:
full_name = obj.__class__.__module__ + "." + obj.__class__.__name__
except Exception as e:
msg = 'ERROR in function app.app_utils.get_class_full_name().'
log.exception(msg)
raise e
return full_name
def get_string_from_function(function_):
if not callable(function_):
msg = 'ERROR: Given input is not a function: {}.'.format(str(function_))
log.error(msg)
raise Exception(msg)
return function_.__module__ + '.' + function_.__name__
def get_function_from_filename(full_filename, function_name):
try:
# print ('\nget_function_from_filename', full_filename, function_name)
if function_name == '<module>':
return None
rel_path = os.path.relpath(full_filename, app.BASE_PATH)
module_rel_path = os.path.splitext(rel_path)[0]
# print (module_rel_path)
module_str = '.'.join(module_rel_path.split(os.path.sep))
# print (module_str)
module_ = importlib.import_module(module_str)
# print (module_, function_name)
function_ = getattr(module_, function_name)
return function_
except:
raise
def get_function_from_string(fullpath_function):
try:
# print ('\nget_function_from_string:', fullpath_function)
module_str = '.'.join(fullpath_function.split('.')[:-1])
function_str = fullpath_function.split('.')[-1]
# print ('importing module:', module_str)
module_ = importlib.import_module(module_str)
# print ('getting function:', function_str, '\n')
function_ = getattr(module_, function_str)
return function_
except Exception as e:
msg = 'ERROR in function app.app_utils.get_function_from_string({}).'.format(fullpath_function)
log.exception(msg)
print(msg)
raise e
class Chronometer(object):
def __init__(self):
self.start_time = timeit.default_timer()
def end(self):
self.total = timeit.default_timer() - self.start_time
return 'Execution in {:0.3f}s'.format(self.total)
# Phoenix DropTarget code
class DropTarget(wx.DropTarget):
def __init__(self, _test_func, callback=None):
wx.DropTarget.__init__(self)
self.data = wx.CustomDataObject('obj_uid')
self.SetDataObject(self.data)
self._test_func = _test_func
self._callback = callback
def OnDrop(self, x, y):
return True
def OnData(self, x, y, defResult):
obj_uid = self._get_object_uid()
if self._callback:
wx.CallAfter(self._callback, obj_uid)
return defResult
def OnDragOver(self, x, y, defResult):
obj_uid = self._get_object_uid()
if obj_uid:
if self._test_func(obj_uid):
return defResult
return wx.DragNone
def _get_object_uid(self):
if self.GetData():
obj_uid_bytes = self.data.GetData().tobytes()
obj_uid_str = obj_uid_bytes.decode()
if obj_uid_str:
obj_uid = parse_string_to_uid(obj_uid_str)
return obj_uid
return None
class GripyEnum(Enum):
def __repr__(self):
# return '{} object, name: {}, value: {}'.format(self.__class__, self.name, self.value)
return str(self.value)
def __eq__(self, other):
if type(other) is self.__class__:
return self.value is other.value
return self.value is other
def __lt__(self, other):
if type(other) is self.__class__:
return self.value < other.value
return self.value < other
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __gt__(self, other):
if type(other) is self.__class__:
return self.value > other.value
return self.value > other
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class GripyEnumBitwise(GripyEnum):
def __or__(self, other):
if type(other) is self.__class__:
return self.value | other.value
return self.value | other
def __ror__(self, other):
return self.__or__(other)
class WellPlotState(GripyEnum):
NORMAL_TOOL = 0
SELECTION_TOOL = 1
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL)
)
class GripyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, wx.Point):
return 'wx.Point' + str(obj)
elif isinstance(obj, wx.Size):
return 'wx.Size' + str(obj)
elif isinstance(obj, GripyEnum):
return str(obj.value)
elif callable(obj):
return get_string_from_function(obj)
try:
return str(obj)
except:
return super(GripyJSONDecoder, self).default(self, obj)
def clean_path_str(path):
# path = path.replace('\\' ,'/')
path = path.encode('ascii', 'ignore') # in order to save unicode characters
path = path.encode('string-escape')
return path
def write_json_file(py_object, fullfilename):
fullfilename = clean_path_str(fullfilename)
fullfilename = os.path.normpath(fullfilename)
directory = os.path.dirname(fullfilename)
if not os.path.exists(directory):
os.makedirs(directory)
msg = 'App.app_utils.write_json_file has created directory: {}'.format(directory)
# log.debug(msg)
print(msg)
f = open(fullfilename, 'w')
f.write(json.dumps(py_object, indent=4, cls=GripyJSONEncoder))
f.close()
class GripyJSONDecoder(json.JSONDecoder):
def decode(self, s, _w=WHITESPACE.match):
self.scan_once = gripy_make_scanner(self)
return super(GripyJSONDecoder, self).decode(s, _w)
def gripy_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
# encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
if string[idx:idx + 10] == '"wx.Point(':
return GripyJSONParser((string, idx + 10), _scan_once, wx.Point)
elif string[idx:idx + 9] == '"wx.Size(':
return GripyJSONParser((string, idx + 9), _scan_once, wx.Size)
return parse_string(string, idx + 1, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), strict,
_scan_once, object_hook, object_pairs_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
def GripyJSONParser(s_and_end, scan_once, _class, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError("Expecting object {}, {}".format(s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ')' and s[end:end + 1] == '"':
end += 1
break
elif nextchar != ',':
raise ValueError("Expecting ',' delimiter {}, {}".format(s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return _class(int(values[0]), int(values[1])), end
def read_json_file(full_filename):
# print ('\nread_json_file:', fullfilename, type(fullfilename))
# fullfilename = fullfilename.replace('\\' ,'/')
# fullfilename = fullfilename.encode('ascii', 'ignore') # in order to save unicode characters
# fullfilename = fullfilename.encode('string-escape')
json_file = open(full_filename, 'r')
state = json.load(json_file, cls=GripyJSONDecoder)
json_file.close()
return state
def parse_string_to_uid(obj_uid_string):
"""
Parse a uid String (which may contains non uid characters like " and \) to
a uid tuple in a format (tid, oid).
Parameters
----------
obj_uid_string : str
The uid String.
Returns
-------
tuple
A pair (tid, oid) which can be a Gripy object identifier.
"""
try:
# print ('parse_string_to_uid:', obj_uid_string)
left_index = obj_uid_string.find('(')
right_index = obj_uid_string.rfind(')')
if left_index == -1 or right_index == -1:
return None
elif right_index < left_index:
return None
obj_uid_string = obj_uid_string[left_index + 1:right_index]
tid, oid = obj_uid_string.split(',')
tid = tid.strip('\'\" ')
oid = int(oid.strip('\'\" '))
return tid, oid
except:
raise
def get_wx_colour_from_seq_string(seq_str):
# tuple or list
if seq_str.startswith('(') or seq_str.startswith('['):
seq_str = seq_str[1:-1]
val = tuple([int(c.strip()) for c in seq_str.split(',')])
color = wx.Colour(val)
print('_get_wx_colour:', color,
color.GetAsString(wx.C2S_HTML_SYNTAX))
return color
return None
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
"""
# MPL 1.4/1.5 COLORS
MPL_CATS_CMAPS = [('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
"""
# MPL 2.0 COLORS
MPL_CATS_CMAPS = [
('Perceptually Uniform Sequential',
['viridis', 'inferno', 'plasma', 'magma']
),
('Sequential',
['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys',
'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'
]
),
('Sequential (2)',
['afmhot', 'autumn', 'bone', 'cool', 'copper', 'gist_heat',
'gray', 'hot', 'pink', 'spring', 'summer', 'winter'
]
),
('Diverging',
['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic'
]
),
('Qualitative',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1',
'Set2', 'Set3', 'Vega10', 'Vega20', 'Vega20b', 'Vega20c'
]
),
('Miscellaneous',
['gist_earth', 'terrain', 'ocean', 'gist_stern', 'brg',
'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv',
'flag', 'prism'
]
)
]
# MPL_COLORMAPS = [value for (key, values) in MPL_CATS_CMAPS for value in values]
MPL_COLORMAPS = sorted(cmap_d)
"""
MPL_COLORMAPS = ['Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r',
'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r',
'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r',
'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r',
'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r',
'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBu_r',
'PuBuGn', 'PuBuGn_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r',
'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r',
'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r',
'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r',
'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Vega10', 'Vega10_r',
'Vega20', 'Vega20_r', 'Vega20b', 'Vega20b_r', 'Vega20c', 'Vega20c_r',
'Wistia', 'Wistia_r', 'YlGn', 'YlGn_r', 'YlGnBu', 'YlGnBu_r',
'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r',
'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r',
'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r',
'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r',
'cubehelix', 'cubehelix_r', 'flag', 'flag_r',
'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r',
'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r',
'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r',
'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r',
'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r',
'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r',
'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r',
'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r',
'rainbow', 'rainbow_r', 'seismic', 'seismic_r',
'spectral', 'spectral_r', 'spring', 'spring_r',
'summer', 'summer_r', 'terrain', 'terrain_r',
'viridis', 'viridis_r', 'winter', 'winter_r']
"""
###############################################################################
###############################################################################
MPL_COLORS = collections.OrderedDict()
MPL_COLORS['Black'] = None
MPL_COLORS['Maroon'] = None
MPL_COLORS['Green'] = wx.Colour(0, 100, 0) # Dark Green
MPL_COLORS['Olive'] = wx.Colour(128, 128, 0)
MPL_COLORS['Navy'] = None
MPL_COLORS['Purple'] = None
MPL_COLORS['Teal'] = wx.Colour(0, 128, 128)
MPL_COLORS['Gray'] = None
MPL_COLORS['Silver'] = wx.Colour(192, 192, 192)
MPL_COLORS['Red'] = None
MPL_COLORS['Lime'] = wx.Colour(0, 255, 0) # Green
MPL_COLORS['Yellow'] = None
MPL_COLORS['Blue'] = None
MPL_COLORS['Fuchsia'] = wx.Colour(255, 0, 255)
MPL_COLORS['Aqua'] = wx.Colour(0, 255, 255)
MPL_COLORS['White'] = None
MPL_COLORS['SkyBlue'] = wx.Colour(135, 206, 235)
MPL_COLORS['LightGray'] = wx.Colour(211, 211, 211)
MPL_COLORS['DarkGray'] = wx.Colour(169, 169, 169)
MPL_COLORS['SlateGray'] = wx.Colour(112, 128, 144)
MPL_COLORS['DimGray'] = wx.Colour(105, 105, 105)
MPL_COLORS['BlueViolet'] = wx.Colour(138, 43, 226)
MPL_COLORS['DarkViolet'] = wx.Colour(148, 0, 211)
MPL_COLORS['Magenta'] = None
MPL_COLORS['DeepPink'] = wx.Colour(148, 0, 211)
MPL_COLORS['Brown'] = None
MPL_COLORS['Crimson'] = wx.Colour(220, 20, 60)
MPL_COLORS['Firebrick'] = None
MPL_COLORS['DarkRed'] = wx.Colour(139, 0, 0)
MPL_COLORS['DarkSlateGray'] = wx.Colour(47, 79, 79)
MPL_COLORS['DarkSlateBlue'] = wx.Colour(72, 61, 139)
MPL_COLORS['Wheat'] = None
MPL_COLORS['BurlyWood'] = wx.Colour(222, 184, 135)
MPL_COLORS['Tan'] = None
MPL_COLORS['Gold'] = None
MPL_COLORS['Orange'] = None
MPL_COLORS['DarkOrange'] = wx.Colour(255, 140, 0)
MPL_COLORS['Coral'] = None
MPL_COLORS['DarkKhaki'] = wx.Colour(189, 183, 107)
MPL_COLORS['GoldenRod'] = None
MPL_COLORS['DarkGoldenrod'] = wx.Colour(184, 134, 11)
MPL_COLORS['Chocolate'] = wx.Colour(210, 105, 30)
MPL_COLORS['Sienna'] = None
MPL_COLORS['SaddleBrown'] = wx.Colour(139, 69, 19)
MPL_COLORS['GreenYellow'] = wx.Colour(173, 255, 47)
MPL_COLORS['Chartreuse'] = wx.Colour(127, 255, 0)
MPL_COLORS['SpringGreen'] = wx.Colour(0, 255, 127)
MPL_COLORS['MediumSpringGreen'] = wx.Colour(0, 250, 154)
MPL_COLORS['MediumAquamarine'] = wx.Colour(102, 205, 170)
MPL_COLORS['LimeGreen'] = wx.Colour(50, 205, 50)
MPL_COLORS['LightSeaGreen'] = wx.Colour(32, 178, 170)
MPL_COLORS['MediumSeaGreen'] = wx.Colour(60, 179, 113)
MPL_COLORS['DarkSeaGreen'] = wx.Colour(143, 188, 143)
MPL_COLORS['SeaGreen'] = wx.Colour(46, 139, 87)
MPL_COLORS['ForestGreen'] = wx.Colour(34, 139, 34)
MPL_COLORS['DarkOliveGreen'] = wx.Colour(85, 107, 47)
MPL_COLORS['DarkGreen'] = wx.Colour(1, 50, 32)
MPL_COLORS['LightCyan'] = wx.Colour(224, 255, 255)
MPL_COLORS['Thistle'] = None
MPL_COLORS['PowderBlue'] = wx.Colour(176, 224, 230)
MPL_COLORS['LightSteelBlue'] = wx.Colour(176, 196, 222)
MPL_COLORS['LightSkyBlue'] = wx.Colour(135, 206, 250)
MPL_COLORS['MediumTurquoise'] = wx.Colour(72, 209, 204)
MPL_COLORS['Turquoise'] = None
MPL_COLORS['DarkTurquoise'] = wx.Colour(0, 206, 209)
MPL_COLORS['DeepSkyBlue'] = wx.Colour(0, 191, 255)
MPL_COLORS['DodgerBlue'] = wx.Colour(30, 144, 255)
MPL_COLORS['CornflowerBlue'] = wx.Colour(100, 149, 237)
MPL_COLORS['CadetBlue'] = wx.Colour(95, 158, 160)
MPL_COLORS['DarkCyan'] = wx.Colour(0, 139, 139)
MPL_COLORS['SteelBlue'] = wx.Colour(70, 130, 180)
MPL_COLORS['RoyalBlue'] = wx.Colour(65, 105, 225)
MPL_COLORS['SlateBlue'] = wx.Colour(106, 90, 205)
MPL_COLORS['DarkBlue'] = wx.Colour(0, 0, 139)
MPL_COLORS['MediumBlue'] = wx.Colour(0, 0, 205)
MPL_COLORS['SandyBrown'] = wx.Colour(244, 164, 96)
MPL_COLORS['DarkSalmon'] = wx.Colour(233, 150, 122)
MPL_COLORS['Salmon'] = None
MPL_COLORS['Tomato'] = wx.Colour(255, 99, 71)
MPL_COLORS['Violet'] = wx.Colour(238, 130, 238)
MPL_COLORS['HotPink'] = wx.Colour(255, 105, 180)
MPL_COLORS['RosyBrown'] = wx.Colour(188, 143, 143)
MPL_COLORS['MediumVioletRed'] = wx.Colour(199, 21, 133)
MPL_COLORS['DarkMagenta'] = wx.Colour(139, 0, 139)
MPL_COLORS['DarkOrchid'] = wx.Colour(153, 50, 204)
MPL_COLORS['Indigo'] = wx.Colour(75, 0, 130)
MPL_COLORS['MidnightBlue'] = wx.Colour(25, 25, 112)
MPL_COLORS['MediumSlateBlue'] = wx.Colour(123, 104, 238)
MPL_COLORS['MediumPurple'] = wx.Colour(147, 112, 219)
MPL_COLORS['MediumOrchid'] = wx.Colour(186, 85, 211)
MPL_COLORS = collections.OrderedDict(sorted(MPL_COLORS.items()))
###############################################################################
###############################################################################
# Based on https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
# 10/September/2019 - Adriano Santana
MPL_LINESTYLES = collections.OrderedDict()
MPL_LINESTYLES['Solid'] = (0, ())
MPL_LINESTYLES['Dotted'] = (0, (1, 1))
MPL_LINESTYLES['Loosely dotted'] = (0, (1, 10))
MPL_LINESTYLES['Densely dotted'] = (0, (1, 1))
MPL_LINESTYLES['Dashed'] = (0, (5, 5))
MPL_LINESTYLES['Loosely dashed'] = (0, (5, 10))
MPL_LINESTYLES['Densely dashed'] = (0, (5, 1))
MPL_LINESTYLES['Dashdotted'] = (0, (3, 5, 1, 5))
MPL_LINESTYLES['Loosely dashdotted'] = (0, (3, 10, 1, 10))
MPL_LINESTYLES['Densely dashdotted'] = (0, (3, 1, 1, 1))
MPL_LINESTYLES['Dashdotdotted'] = (0, (3, 5, 1, 5, 1, 5))
MPL_LINESTYLES['Loosely dashdotdotted'] = (0, (3, 10, 1, 10, 1, 10))
MPL_LINESTYLES['Densely dashdotdotted'] = (0, (3, 1, 1, 1, 1, 1))
| apache-2.0 |
DEVELByte/incubator-airflow | airflow/www/views.py | 2 | 91943 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.utils import old_div
from past.builtins import basestring, unicode
import os
import pkg_resources
import socket
import importlib
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
from itertools import chain, product
import json
from lxml import html
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template, make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
import jinja2
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.logging import LoggingMixin
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils import logging as log_utils
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.configuration import AirflowConfigException
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
DEFAULT_SENSITIVE_VARIABLE_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def should_hide_value_for_key(key_name):
return any(s in key_name for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \
and conf.getboolean('admin', 'hide_sensitive_variable_fields')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
task_ids = []
dag_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
if not dag.is_subdag:
dag_ids.append(dag.dag_id)
TI = models.TaskInstance
DagRun = models.DagRun
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.filter(DagRun.state != State.RUNNING)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(DagRun.state == State.RUNNING)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
m = importlib.import_module(dag.module_name)
code = inspect.getsource(m)
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/sandbox')
@login_required
def sandbox(self):
title = "Sandbox Suggested Configuration"
cfg_loc = conf.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
log_loaded = False
if os.path.exists(loc):
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
log_loaded = True
except:
log = "*** Failed to load local log file: {0}.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{host}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
response.raise_for_status()
log += '\n' + response.text
log_loaded = True
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
if not log_loaded:
# load remote logs
remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_log = os.path.join(remote_log_base, log_relative)
log += '\n*** Reading remote logs...\n'
# S3
if remote_log.startswith('s3:/'):
log += log_utils.S3Log().read(remote_log, return_error=True)
# GCS
elif remote_log.startswith('gs:/'):
log += log_utils.GCSLog().read(remote_log, return_error=True)
# unsupported
elif remote_log:
log += '*** Unsupported remote log location.'
session.commit()
session.close()
if PY2 and not isinstance(log, unicode):
log = log.decode('utf-8')
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
MAX_PERIODS = 1000
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
dag_ids = [dag_id]
task_id_to_dag = {
task_id: dag
}
end_date = ((dag.latest_execution_date or datetime.now())
if future else execution_date)
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if recursive:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
if downstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if recursive:
recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)
if upstream:
relatives = task.get_flat_relatives(upstream=False)
task_ids += [t.task_id for t in relatives]
if recursive:
recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)
TI = models.TaskInstance
if dag.schedule_interval == '@once':
dates = [start_date]
else:
dates = dag.date_range(start_date, end_date=end_date)
tis = session.query(TI).filter(
TI.dag_id.in_(dag_ids),
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids)).all()
tis_to_change = session.query(TI).filter(
TI.dag_id.in_(dag_ids),
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids),
TI.state != State.SUCCESS).all()
tasks = list(product(task_ids, dates))
tis_to_create = list(
set(tasks) -
set([(ti.task_id, ti.execution_date) for ti in tis]))
tis_all_altered = list(chain(
[(ti.task_id, ti.execution_date) for ti in tis_to_change],
tis_to_create))
if len(tis_all_altered) > MAX_PERIODS:
flash("Too many tasks at once (>{0})".format(
MAX_PERIODS), 'error')
return redirect(origin)
if confirmed:
for ti in tis_to_change:
ti.state = State.SUCCESS
session.commit()
for task_id, task_execution_date in tis_to_create:
ti = TI(
task=task_id_to_dag[task_id].get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(tis_all_altered)))
return redirect(origin)
else:
if not tis_all_altered:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id, task_execution_date in tis_all_altered:
tis.append(TI(
task=task_id_to_dag[task_id].get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if isinstance(tid, dict) and tid.get("state") == State.RUNNING:
d = datetime.now() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=600, width="1200")
for task in dag.tasks:
y = []
x = []
cum_y = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(float(ti.duration) / (60*60))
fails = session.query(models.TaskFail).filter_by(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date).all()
fails_total = sum([f.duration for f in fails])
cum_y.append(float(ti.duration + fails_total) / (60*60))
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
cum_chart.add_serie(name=task.task_id, x=x, y=cum_y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildhtml()
cum_chart.buildhtml()
cum_chart_body = html.document_fromstring(str(cum_chart)).find('body')
cum_chart_script = cum_chart_body.find('script')
s_index = cum_chart_script.text.rfind('});')
cum_chart_script.text = cum_chart_script.text[:s_index]\
+ "$( document ).trigger('chartload')"\
+ cum_chart_script.text[s_index:]
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart,
cum_chart=html.tostring(cum_chart_body)
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=600, width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildhtml()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval:
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = old_div((ti.end_date - ts).total_seconds(), 60*60)
x.append(dttm)
y.append(secs)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart,
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused')
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.now()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state:ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
# read orm_dags from the db
qry = session.query(DM)
qry_fltr = []
if do_filter and owner_mode == 'ldapgroup':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners.in_(current_user.ldap_groups)
).all()
elif do_filter and owner_mode == 'user':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
).all()
else:
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active
).all()
orm_dags = {dag.dag_id: dag for dag in qry_fltr}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
all_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if should_hide_value_for_key(model.key):
return Markup('*' * 8)
return getattr(model, name)
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
page_size = 20
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun)\
.filter(models.DagRun.id.in_(ids))\
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
models.DagStat.set_dirty(row.dag_id, session=session)
dirty_ids.append(row.dag_id)
models.DagStat.clean_dirty(dirty_ids, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
models.DagStat.clean_dirty(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
"""
As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.delete_task_instances(ids)
else:
super(TaskInstanceModelView, self).action_delete(ids)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@provide_session
def delete_task_instances(self, ids, session=None):
try:
TI = models.TaskInstance
count = 0
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
count += session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).delete()
session.commit()
flash("{count} task instances were deleted".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to delete', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("airflow")[0].version
except Exception as e:
airflow_version = None
self.logger.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
self.logger.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
woutdenolf/spectrocrunch | spectrocrunch/visualization/tests/test_scene.py | 1 | 2667 | # -*- coding: utf-8 -*-
import unittest
import matplotlib.pyplot as plt
import numpy as np
from .. import scene
from ...patch.pint import ureg
class test_scene(unittest.TestCase):
def test_images(self):
n0, n1 = 5, 10
img = np.arange(n0 * n1).reshape(n0, n1)
unit0 = ureg.mm
unit1 = ureg.micrometer
s1 = scene.Scene(unit0=unit0, unit1=unit1)
s2 = scene.Scene(unit0=unit0, unit1=unit1)
s2.transpose(True)
# s2.flipx(increasing=True)
s2.axlabels = ["dim0", "dim1"]
s2.cmap = plt.get_cmap("gray")
o1 = scene.Image(
img, lim0=s1.q0([8, 8 + n0 - 1]), lim1=s1.q1([10 + n1 - 1, 10])
)
s1.register(o1)
s2.register(o1)
p0 = sorted(o1.datarange(0, border=False))
p1 = sorted(o1.datarange(1, border=False))
o = scene.Polyline([p0[0], p0[1], p0[1], p0[0]], [p1[0], p1[0], p1[1], p1[1]])
s1.register(o)
s2.register(o)
o.set_setting("scatter", True)
o2 = scene.Image(
img, lim0=s1.q0([-2, -2 + n0 - 1]), lim1=s1.q1([-1, -1 + n1 - 1])
)
s1.register(o2)
s2.register(o2)
o.set_setting("scatter", True)
p0 = sorted(o2.datarange(0, border=False))
p1 = sorted(o2.datarange(1, border=False))
o = scene.Text(
[p0[0], p0[1], p0[1], p0[0]],
[p1[0], p1[0], p1[1], p1[1]],
labels=[1, 2, 3, 4],
)
s1.register(o)
s2.register(o)
f, ax = plt.subplots()
s1.setaxes(ax)
f, ax = plt.subplots()
s2.setaxes(ax)
# Update scene 1
s1.updateview()
# Shift image, axes scaling and update scene 2
o1.lim[0] = s1.q0([9, 9 + n0 - 1])
s2.setdatarange(0, s1.q0([0, 1]))
s2.setdatarange(1, s1.q1([0, 1]))
s2.updateview()
# plt.pause(0.01)
# Update scene 1
s1.updateview()
# Reset axes of scene 1
f, ax = plt.subplots()
s1.setaxes(ax)
# Shift image, axes offset, different normalization and update scene 1
o1.lim[0] = s1.q0([9, 9 + n0 - 1])
s1.set_settings({"cnorm": "power", "cnormargs": (0.1,)})
s1.updateview()
# plt.pause(0.01)
# plt.show()
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_scene("test_images"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
| mit |
dato-code/SFrame | oss_src/unity/python/sframe/test/test_sarray.py | 5 | 120681 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from ..data_structures.sarray import SArray
from ..util.timezone import GMT
from . import util
import binascii
import pandas as pd
import numpy as np
import unittest
import random
import datetime as dt
import copy
import os
import math
import shutil
import array
import time
import itertools
import warnings
import functools
import tempfile
import sys
import six
from nose.tools import nottest
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArrayTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.bool_data = [x % 2 == 0 for x in range(10)]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0)),None]
self.datetime_data2 = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111).replace(tzinfo=GMT(0.0)),None]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["abc", "def", "hello", "world", "pika", "chu", "hello", "world"]
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertEqual(len(_sarray), len(_data))
self.assertSequenceEqual(list(_sarray.head(_sarray.size())), _data)
def __test_almost_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertEqual(len(_sarray), len(_data))
l = list(_sarray)
for i in range(len(l)):
if type(l[i]) in (list, array.array):
for j in range(len(l[i])):
self.assertAlmostEqual(l[i][j], _data[i][j])
else:
self.assertAlmostEqual(l[i], _data[i])
def __test_creation(self, data, dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data, dtype)
self.__test_equal(s, expected, dtype)
s = SArray(pd.Series(data), dtype)
self.__test_equal(s, expected, dtype)
def __test_creation_type_inference(self, data, expected_dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data)
self.__test_equal(s, expected, expected_dtype)
s = SArray(pd.Series(data))
self.__test_equal(s, expected, expected_dtype)
def test_creation(self):
self.__test_creation(self.int_data, int, self.int_data)
self.__test_creation(self.int_data, float, [float(x) for x in self.int_data])
self.__test_creation(self.int_data, str, [str(x) for x in self.int_data])
self.__test_creation(self.float_data, float, self.float_data)
self.assertRaises(TypeError, self.__test_creation, [self.float_data, int])
self.__test_creation(self.string_data, str, self.string_data)
self.assertRaises(TypeError, self.__test_creation, [self.string_data, int])
self.assertRaises(TypeError, self.__test_creation, [self.string_data, float])
expected_output = [chr(x) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(SArray(self.url, str), expected_output, str)
self.__test_creation(self.vec_data, array.array, self.vec_data)
self.__test_creation(self.list_data, list, self.list_data)
self.__test_creation(self.dict_data, dict, self.dict_data)
# test with type inference
self.__test_creation_type_inference(self.int_data, int, self.int_data)
self.__test_creation_type_inference(self.float_data, float, self.float_data)
self.__test_creation_type_inference(self.bool_data, int, [int(x) for x in self.bool_data])
self.__test_creation_type_inference(self.string_data, str, self.string_data)
self.__test_creation_type_inference(self.vec_data, array.array, self.vec_data)
self.__test_creation_type_inference([np.bool_(True),np.bool_(False)],int,[1,0])
self.__test_creation((1,2,3,4), int, [1,2,3,4])
def test_list_with_none_creation(self):
tlist=[[2,3,4],[5,6],[4,5,10,None]]
g=SArray(tlist)
self.assertEqual(len(g), len(tlist))
for i in range(len(tlist)):
self.assertEqual(g[i], tlist[i])
def test_list_with_array_creation(self):
import array
t = array.array('d',[1.1,2,3,4,5.5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), float)
glist = list(g)
for i in range(len(glist)):
self.assertAlmostEqual(glist[i], t[i])
t = array.array('i',[1,2,3,4,5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), int)
glist = list(g)
for i in range(len(glist)):
self.assertEqual(glist[i], t[i])
def test_in(self):
sint = SArray(self.int_data, int)
self.assertTrue(5 in sint)
self.assertFalse(20 in sint)
sstr = SArray(self.string_data, str)
self.assertTrue("abc" in sstr)
self.assertFalse("zzzzzz" in sstr)
self.assertFalse("" in sstr)
self.__test_equal(sstr.contains("ll"), ["ll" in i for i in self.string_data], int)
self.__test_equal(sstr.contains("a"), ["a" in i for i in self.string_data], int)
svec = SArray([[1.0,2.0],[2.0,3.0],[3.0,4.0],[4.0,5.0]], array.array)
self.__test_equal(svec.contains(1.0), [1,0,0,0], int)
self.__test_equal(svec.contains(0.0), [0,0,0,0], int)
self.__test_equal(svec.contains(2), [1,1,0,0], int)
slist = SArray([[1,"22"],[2,"33"],[3,"44"],[4,None]], list)
self.__test_equal(slist.contains(1.0), [1,0,0,0], int)
self.__test_equal(slist.contains(3), [0,0,1,0], int)
self.__test_equal(slist.contains("33"), [0,1,0,0], int)
self.__test_equal(slist.contains("3"), [0,0,0,0], int)
self.__test_equal(slist.contains(None), [0,0,0,1], int)
sdict = SArray([{1:"2"},{2:"3"},{3:"4"},{"4":"5"}], dict)
self.__test_equal(sdict.contains(1.0), [1,0,0,0], int)
self.__test_equal(sdict.contains(3), [0,0,1,0], int)
self.__test_equal(sdict.contains("4"), [0,0,0,1], int)
self.__test_equal(sdict.contains("3"), [0,0,0,0], int)
self.__test_equal(SArray(['ab','bc','cd']).is_in('abc'), [1,1,0], int)
self.__test_equal(SArray(['a','b','c']).is_in(['a','b']), [1,1,0], int)
self.__test_equal(SArray([1,2,3]).is_in(array.array('d',[1.0,2.0])), [1,1,0], int)
self.__test_equal(SArray([1,2,None]).is_in([1, None]), [1,0,1], int)
self.__test_equal(SArray([1,2,None]).is_in([1]), [1,0,0], int)
def test_save_load(self):
# Make sure these files don't exist before testing
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
sint = SArray(self.int_data, int)
sflt = SArray([float(x) for x in self.int_data], float)
sstr = SArray([str(x) for x in self.int_data], str)
svec = SArray(self.vec_data, array.array)
slist = SArray(self.list_data, list)
sdict = SArray(self.dict_data, dict)
sint.save('intarr.sidx')
sflt.save('fltarr.sidx')
sstr.save('strarr.sidx')
svec.save('vecarr.sidx')
slist.save('listarr.sidx')
sdict.save('dictarr.sidx')
sint2 = SArray('intarr.sidx')
sflt2 = SArray('fltarr.sidx')
sstr2 = SArray('strarr.sidx')
svec2 = SArray('vecarr.sidx')
slist2 = SArray('listarr.sidx')
sdict2 = SArray('dictarr.sidx')
self.assertRaises(IOError, lambda: SArray('__no_such_file__.sidx'))
self.__test_equal(sint2, self.int_data, int)
self.__test_equal(sflt2, [float(x) for x in self.int_data], float)
self.__test_equal(sstr2, [str(x) for x in self.int_data], str)
self.__test_equal(svec2, self.vec_data, array.array)
self.__test_equal(slist2, self.list_data, list)
self.__test_equal(sdict2, self.dict_data, dict)
# Bad permission
# Windows has a way more ridiculous way of setting permissions. I'm
# sure windows will stop us from writing if we don't have
# permission...probably no reason to test
if sys.platform != 'win32':
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sint.save(os.path.join(test_dir, 'bad.sidx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sint3 = SArray(os.path.join(test_dir, 'bad.sidx'))
os.removedirs(test_dir)
#cleanup
del sint2
del sflt2
del sstr2
del svec2
del slist2
del sdict2
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
def test_save_load_text(self):
self._remove_single_file('txt_int_arr.txt')
sint = SArray(self.int_data, int)
sint.save('txt_int_arr.txt')
self.assertTrue(os.path.exists('txt_int_arr.txt'))
f = open('txt_int_arr.txt')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr.txt')
self._remove_single_file('txt_int_arr')
sint.save('txt_int_arr', format='text')
self.assertTrue(os.path.exists('txt_int_arr'))
f = open('txt_int_arr')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr')
def _remove_single_file(self, filename):
try:
os.remove(filename)
except:
pass
def _remove_sarray_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
shutil.rmtree(f)
def test_transform(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000), int)
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
# test transform with missing values
sa = SArray([1,2,3,None,4,5])
sa1 = sa.apply(lambda x : x + 1)
self.__test_equal(sa1, [2,3,4,None,5,6], int)
def test_transform_with_multiple_lambda(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
sa2_int = sa_int.apply(lambda val: val + 1, int)
expected_output = [x for x in range(ord('a') + 1, ord('a') + 26 + 1)]
self.__test_equal(sa2_int, expected_output, int)
def test_transform_with_exception(self):
sa_char = SArray(['a' for i in range(10000)], str)
# # type mismatch exception
self.assertRaises(TypeError, lambda: sa_char.apply(lambda char: char, int).head(1))
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0, float))
def test_transform_with_type_inference(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char))
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
sa_bool = sa_char.apply(lambda char: ord(char) > ord('c'))
expected_output = [int(x > ord('c')) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_bool, expected_output, int)
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0))
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000))
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
def test_transform_on_lists(self):
sa_int = SArray(self.int_data, int)
sa_vec2 = sa_int.apply(lambda x: [x, x+1, str(x)])
expected = [[i, i + 1, str(i)] for i in self.int_data]
self.__test_equal(sa_vec2, expected, list)
sa_int_again = sa_vec2.apply(lambda x: int(x[0]))
self.__test_equal(sa_int_again, self.int_data, int)
# transform from vector to vector
sa_vec = SArray(self.vec_data, array.array)
sa_vec2 = sa_vec.apply(lambda x: x)
self.__test_equal(sa_vec2, self.vec_data, array.array)
# transform on list
sa_list = SArray(self.list_data, list)
sa_list2 = sa_list.apply(lambda x: x)
self.__test_equal(sa_list2, self.list_data, list)
# transform dict to list
sa_dict = SArray(self.dict_data, dict)
# Python 3 doesn't return keys in same order from identical dictionaries.
sort_by_type = lambda x : str(type(x))
sa_list = sa_dict.apply(lambda x: sorted(list(x), key = sort_by_type))
self.__test_equal(sa_list, [sorted(list(x), key = sort_by_type) for x in self.dict_data], list)
def test_transform_dict(self):
# lambda accesses dict
sa_dict = SArray([{'a':1}, {1:2}, {'c': 'a'}, None], dict)
sa_bool_r = sa_dict.apply(lambda x: 'a' in x if x != None else None, skip_undefined=False)
expected_output = [1, 0, 0, None]
self.__test_equal(sa_bool_r, expected_output, int)
# lambda returns dict
expected_output = [{'a':1}, {1:2}, None, {'c': 'a'}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.apply(lambda x: x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter_dict(self):
expected_output = [{'a':1}]
sa_dict = SArray(expected_output, dict)
ret = sa_dict.filter(lambda x: 'a' in x)
self.__test_equal(ret, expected_output, dict)
# try second time to make sure the lambda system still works
expected_output = [{1:2}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.filter(lambda x: 1 in x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter(self):
# test empty
s = SArray([], float)
no_change = s.filter(lambda x : x == 0)
self.assertEqual(no_change.size(), 0)
# test normal case
s = SArray(self.int_data, int)
middle_of_array = s.filter(lambda x: x > 3 and x < 8)
self.assertEqual(list(middle_of_array.head(10)), [x for x in range(4,8)])
# test normal string case
s = SArray(self.string_data, str)
exp_val_list = [x for x in self.string_data if x != 'world']
# Remove all words whose second letter is not in the first half of the alphabet
second_letter = s.filter(lambda x: len(x) > 1 and (ord(x[1]) > ord('a')) and (ord(x[1]) < ord('n')))
self.assertEqual(list(second_letter.head(10)), exp_val_list)
# test not-a-lambda
def a_filter_func(x):
return ((x > 4.4) and (x < 6.8))
s = SArray(self.int_data, float)
another = s.filter(a_filter_func)
self.assertEqual(list(another.head(10)), [5.,6.])
sa = SArray(self.float_data)
# filter by self
sa2 = sa[sa]
self.assertEquals(list(sa.head(10)), list(sa2.head(10)))
# filter by zeros
sa_filter = SArray([0,0,0,0,0,0,0,0,0,0])
sa2 = sa[sa_filter]
self.assertEquals(len(sa2), 0)
# filter by wrong size
sa_filter = SArray([0,2,5])
with self.assertRaises(IndexError):
sa2 = sa[sa_filter]
def test_any_all(self):
s = SArray([0,1,2,3,4,5,6,7,8,9], int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
s = SArray([0,0,0,0,0], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray(self.string_data, str)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
s = SArray(self.int_data, int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
# test empty
s = SArray([], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), True)
s = SArray([[], []], array.array)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray([[],[1.0]], array.array)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
def test_astype(self):
# test empty
s = SArray([], int)
as_out = s.astype(float)
self.assertEqual(as_out.dtype(), float)
# test float -> int
s = SArray(list(map(lambda x: x+0.2, self.float_data)), float)
as_out = s.astype(int)
self.assertEqual(list(as_out.head(10)), self.int_data)
# test int->string
s = SArray(self.int_data, int)
as_out = s.astype(str)
self.assertEqual(list(as_out.head(10)), list(map(lambda x: str(x), self.int_data)))
i_out = as_out.astype(int)
self.assertEqual(list(i_out.head(10)), list(s.head(10)))
s = SArray(self.vec_data, array.array)
with self.assertRaises(RuntimeError):
s.astype(int)
with self.assertRaises(RuntimeError):
s.astype(float)
s = SArray(["a","1","2","3"])
with self.assertRaises(RuntimeError):
s.astype(int)
self.assertEqual(list(s.astype(int,True).head(4)), [None,1,2,3])
s = SArray(["[1 2 3]","[4;5]"])
ret = list(s.astype(array.array).head(2))
self.assertEqual(ret, [array.array('d',[1,2,3]),array.array('d',[4,5])])
s = SArray(["[1,\"b\",3]","[4,5]"])
ret = list(s.astype(list).head(2))
self.assertEqual(ret, [[1,"b",3],[4,5]])
s = SArray(["{\"a\":2,\"b\":3}","{}"])
ret = list(s.astype(dict).head(2))
self.assertEqual(ret, [{"a":2,"b":3},{}])
s = SArray(["[1abc]"])
ret = list(s.astype(list).head(1))
self.assertEqual(ret, [["1abc"]])
s = SArray(["{1xyz:1a,2b:2}"])
ret = list(s.astype(dict).head(1))
self.assertEqual(ret, [{"1xyz":"1a","2b":2}])
# astype between list and array
s = SArray([array.array('d',[1.0,2.0]), array.array('d',[2.0,3.0])])
ret = list(s.astype(list))
self.assertEqual(ret, [[1.0, 2.0], [2.0,3.0]])
ret = list(s.astype(list).astype(array.array))
self.assertEqual(list(s), list(ret))
with self.assertRaises(RuntimeError):
ret = list(SArray([["a",1.0],["b",2.0]]).astype(array.array))
badcast = list(SArray([["a",1.0],["b",2.0]]).astype(array.array, undefined_on_failure=True))
self.assertEqual(badcast, [None, None])
def test_clip(self):
# invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.clip(25,26)
with self.assertRaises(RuntimeError):
s.clip_lower(25)
with self.assertRaises(RuntimeError):
s.clip_upper(26)
# int w/ int, test lower and upper functions too
# int w/float, no change
s = SArray(self.int_data, int)
clip_out = s.clip(3,7).head(10)
# test that our list isn't cast to float if nothing happened
clip_out_nc = s.clip(0.2, 10.2).head(10)
lclip_out = s.clip_lower(3).head(10)
rclip_out = s.clip_upper(7).head(10)
self.assertEqual(len(clip_out), len(self.int_data))
self.assertEqual(len(lclip_out), len(self.int_data))
self.assertEqual(len(rclip_out), len(self.int_data))
for i in range(0,len(clip_out)):
if i < 2:
self.assertEqual(clip_out[i], 3)
self.assertEqual(lclip_out[i], 3)
self.assertEqual(rclip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
elif i > 6:
self.assertEqual(clip_out[i], 7)
self.assertEqual(lclip_out[i], self.int_data[i])
self.assertEqual(rclip_out[i], 7)
self.assertEqual(clip_out_nc[i], self.int_data[i])
else:
self.assertEqual(clip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
# int w/float, change
# float w/int
# float w/float
clip_out = s.clip(2.8, 7.2).head(10)
fs = SArray(self.float_data, float)
ficlip_out = fs.clip(3, 7).head(10)
ffclip_out = fs.clip(2.8, 7.2).head(10)
for i in range(0,len(clip_out)):
if i < 2:
self.assertAlmostEqual(clip_out[i], 2.8)
self.assertAlmostEqual(ffclip_out[i], 2.8)
self.assertAlmostEqual(ficlip_out[i], 3.)
elif i > 6:
self.assertAlmostEqual(clip_out[i], 7.2)
self.assertAlmostEqual(ffclip_out[i], 7.2)
self.assertAlmostEqual(ficlip_out[i], 7.)
else:
self.assertAlmostEqual(clip_out[i], self.float_data[i])
self.assertAlmostEqual(ffclip_out[i], self.float_data[i])
self.assertAlmostEqual(ficlip_out[i], self.float_data[i])
vs = SArray(self.vec_data, array.array);
clipvs = vs.clip(3, 7).head(100)
self.assertEqual(len(clipvs), len(self.vec_data));
for i in range(0, len(clipvs)):
a = clipvs[i]
b = self.vec_data[i]
self.assertEqual(len(a), len(b))
for j in range(0, len(b)):
if b[j] < 3:
b[j] = 3
elif b[j] > 7:
b[j] = 7
self.assertEqual(a, b)
def test_missing(self):
s=SArray(self.int_data, int)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.int_data + [None], int)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.float_data, float)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.float_data + [None], float)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.string_data, str)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.string_data + [None], str)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.vec_data, array.array)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.vec_data + [None], array.array)
self.assertEqual(s.num_missing(), 1)
def test_nonzero(self):
# test empty
s = SArray([],int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test all nonzero
s = SArray(self.float_data, float)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.float_data))
# test all zero
s = SArray([0 for x in range(0,10)], int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test strings
str_list = copy.deepcopy(self.string_data)
str_list.append("")
s = SArray(str_list, str)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.string_data))
def test_std_var(self):
# test empty
s = SArray([], int)
self.assertTrue(s.std() is None)
self.assertTrue(s.var() is None)
# increasing ints
s = SArray(self.int_data, int)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# increasing floats
s = SArray(self.float_data, float)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# vary ddof
self.assertAlmostEqual(s.var(ddof=3), 11.7857143)
self.assertAlmostEqual(s.var(ddof=6), 20.625)
self.assertAlmostEqual(s.var(ddof=9), 82.5)
self.assertAlmostEqual(s.std(ddof=3), 3.4330328)
self.assertAlmostEqual(s.std(ddof=6), 4.5414755)
self.assertAlmostEqual(s.std(ddof=9), 9.08295106)
# bad ddof
with self.assertRaises(RuntimeError):
s.var(ddof=11)
with self.assertRaises(RuntimeError):
s.std(ddof=11)
# bad type
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.std()
with self.assertRaises(RuntimeError):
s.var()
# overflow test
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertAlmostEqual(s.var(), 21267647932558653957237540927630737409.0)
self.assertAlmostEqual(s.std(), 4611686018427387900.0)
def test_tail(self):
# test empty
s = SArray([], int)
self.assertEqual(len(s.tail()), 0)
# test standard tail
s = SArray([x for x in range(0,40)], int)
self.assertEqual(list(s.tail()), [x for x in range(30,40)])
# smaller amount
self.assertEqual(list(s.tail(3)), [x for x in range(37,40)])
# larger amount
self.assertEqual(list(s.tail(40)), [x for x in range(0,40)])
# too large
self.assertEqual(list(s.tail(81)), [x for x in range(0,40)])
def test_max_min_sum_mean(self):
# negative and positive
s = SArray([-2,-1,0,1,2], int)
self.assertEqual(s.max(), 2)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), 0)
self.assertAlmostEqual(s.mean(), 0.)
# test valid and invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.max()
with self.assertRaises(RuntimeError):
s.min()
with self.assertRaises(RuntimeError):
s.sum()
with self.assertRaises(RuntimeError):
s.mean()
s = SArray(self.int_data, int)
self.assertEqual(s.max(), 10)
self.assertEqual(s.min(), 1)
self.assertEqual(s.sum(), 55)
self.assertAlmostEqual(s.mean(), 5.5)
s = SArray(self.float_data, float)
self.assertEqual(s.max(), 10.)
self.assertEqual(s.min(), 1.)
self.assertEqual(s.sum(), 55.)
self.assertAlmostEqual(s.mean(), 5.5)
# test all negative
s = SArray(list(map(lambda x: x*-1, self.int_data)), int)
self.assertEqual(s.max(), -1)
self.assertEqual(s.min(), -10)
self.assertEqual(s.sum(), -55)
self.assertAlmostEqual(s.mean(), -5.5)
# test empty
s = SArray([], float)
self.assertTrue(s.max() is None)
self.assertTrue(s.min() is None)
self.assertTrue(s.mean() is None)
# test sum
t = SArray([], float).sum()
self.assertTrue(type(t) == float)
self.assertTrue(t == 0.0)
t = SArray([], int).sum()
self.assertTrue(type(t) == int or type(t) == long)
self.assertTrue(t == 0)
self.assertTrue(SArray([], array.array).sum() == array.array('d',[]))
# test big ints
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertEqual(s.max(), huge_int)
self.assertEqual(s.min(), 1)
# yes, we overflow
self.assertEqual(s.sum(), (huge_int+1)*-1)
# ...but not here
self.assertAlmostEqual(s.mean(), 4611686018427387904.)
a = SArray([[1,2],[1,2],[1,2]], array.array)
self.assertEqual(a.sum(), array.array('d', [3,6]))
self.assertEqual(a.mean(), array.array('d', [1,2]))
with self.assertRaises(RuntimeError):
a.max()
with self.assertRaises(RuntimeError):
a.min()
a = SArray([[1,2],[1,2],[1,2,3]], array.array)
with self.assertRaises(RuntimeError):
a.sum()
with self.assertRaises(RuntimeError):
a.mean()
def test_max_min_sum_mean_missing(self):
# negative and positive
s = SArray([-2,0,None,None,None], int)
self.assertEqual(s.max(), 0)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), -2)
self.assertAlmostEqual(s.mean(), -1)
s = SArray([None,None,None], int)
self.assertEqual(s.max(), None)
self.assertEqual(s.min(), None)
self.assertEqual(s.sum(), 0)
self.assertEqual(s.mean(), None)
def test_python_special_functions(self):
s = SArray([], int)
self.assertEqual(len(s), 0)
self.assertEqual(str(s), '[]')
self.assertRaises(ValueError, lambda: bool(s))
# increasing ints
s = SArray(self.int_data, int)
self.assertEqual(len(s), len(self.int_data))
self.assertEqual(list(s), self.int_data)
self.assertRaises(ValueError, lambda: bool(s))
realsum = sum(self.int_data)
sum1 = sum([x for x in s])
sum2 = s.sum()
sum3 = s.apply(lambda x:x, int).sum()
self.assertEquals(sum1, realsum)
self.assertEquals(sum2, realsum)
self.assertEquals(sum3, realsum)
# abs
s=np.array(range(-10, 10))
t = SArray(s, int)
self.__test_equal(abs(t), list(abs(s)), int)
t = SArray(s, float)
self.__test_equal(abs(t), list(abs(s)), float)
t = SArray([s], array.array)
self.__test_equal(SArray(abs(t)[0]), list(abs(s)), float)
def test_scalar_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
t = SArray(s, int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(t - 1, list(s - 1), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / 2, list(s / 2.0), float)
self.__test_equal(t * 2, list(s * 2), int)
self.__test_equal(t ** 2, list(s ** 2), float)
self.__test_almost_equal(t ** 0.5, list(s ** 0.5), float)
self.__test_equal(((t ** 2) ** 0.5 + 1e-8).astype(int), list(s), int)
self.__test_equal(t < 5, list(s < 5), int)
self.__test_equal(t > 5, list(s > 5), int)
self.__test_equal(t <= 5, list(s <= 5), int)
self.__test_equal(t >= 5, list(s >= 5), int)
self.__test_equal(t == 5, list(s == 5), int)
self.__test_equal(t != 5, list(s != 5), int)
self.__test_equal(t % 5, list(s % 5), int)
self.__test_equal(t // 5, list(s // 5), int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(+t, list(+s), int)
self.__test_equal(-t, list(-s), int)
self.__test_equal(1.5 - t, list(1.5 - s), float)
self.__test_equal(2.0 / t, list(2.0 / s), float)
self.__test_equal(2 / t, list(2.0 / s), float)
self.__test_equal(2.5 * t, list(2.5 * s), float)
self.__test_equal(2**t, list(2**s), float)
s_neg = np.array([-1,-2,-3,5,6,7,8,9,10]);
t_neg = SArray(s_neg, int)
self.__test_equal(t_neg // 5, list(s_neg // 5), int)
self.__test_equal(t_neg % 5, list(s_neg % 5), int)
s=["a","b","c"]
t = SArray(s, str)
self.__test_equal(t + "x", [i + "x" for i in s], str)
with self.assertRaises(RuntimeError):
t - 'x'
with self.assertRaises(RuntimeError):
t * 'x'
with self.assertRaises(RuntimeError):
t / 'x'
s = SArray(self.vec_data, array.array)
self.__test_equal(s + 1, [array.array('d', [float(j) + 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - 1, [array.array('d', [float(j) - 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * 2, [array.array('d', [float(j) * 2 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / 2, [array.array('d', [float(j) / 2 for j in i]) for i in self.vec_data], array.array)
s = SArray([1,2,3,4,None])
self.__test_equal(s == None, [0,0,0,0,1], int)
self.__test_equal(s != None, [1,1,1,1,0], int)
def test_modulus_operator(self):
l = [-5,-4,-3,-2,-1,0,1,2,3,4,5]
t = SArray(l, int)
self.__test_equal(t % 2, [i % 2 for i in l], int)
self.__test_equal(t % -2, [i % -2 for i in l], int)
def test_vector_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
s2=np.array([5,4,3,2,1,10,9,8,7,6]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t + t2, list(s + s2), int)
self.__test_equal(t - t2, list(s - s2), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / t2, list(s.astype(float) / s2), float)
self.__test_equal(t * t2, list(s * s2), int)
self.__test_equal(t ** t2, list(s ** s2), float)
self.__test_almost_equal(t ** (1.0 / t2), list(s ** (1.0 / s2)), float)
self.__test_equal(t > t2, list(s > s2), int)
self.__test_equal(t <= t2, list(s <= s2), int)
self.__test_equal(t >= t2, list(s >= s2), int)
self.__test_equal(t == t2, list(s == s2), int)
self.__test_equal(t != t2, list(s != s2), int)
s = SArray(self.vec_data, array.array)
self.__test_almost_equal(s + s, [array.array('d', [float(j) + float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s - s, [array.array('d', [float(j) - float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s * s, [array.array('d', [float(j) * float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s / s, [array.array('d', [float(j) / float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s ** s, [array.array('d', [float(j) ** float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s // s, [array.array('d', [float(j) // float(j) for j in i]) for i in self.vec_data], array.array)
t = SArray(self.float_data, float)
self.__test_almost_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s ** t, [array.array('d', [float(j) ** i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s // t, [array.array('d', [float(j) // i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(+s, [array.array('d', [float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(-s, [array.array('d', [-float(j) for j in i]) for i in self.vec_data], array.array)
neg_float_data = [-v for v in self.float_data]
t = SArray(neg_float_data, float)
self.__test_almost_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s ** t, [array.array('d', [float(j) ** i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s // t, [array.array('d', [float(j) // i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(t // s, [array.array('d', [i[1] // float(j) for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
s = SArray([1,2,3,4,None])
self.assertTrue((s==s).all())
s = SArray([1,2,3,4,None])
self.assertFalse((s!=s).any())
def test_div_corner(self):
def try_eq_sa_val(left_val, right_val):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(right_val) is array.array:
if type(left_val) is array.array:
v2 = array.array('d', [lv // rv for lv, rv in zip(left_val, right_val)])
else:
v2 = array.array('d', [left_val // rv for rv in right_val])
else:
if type(left_val) is array.array:
v2 = array.array('d', [lv // right_val for lv in left_val])
else:
v2 = left_val // right_val
if type(v1) in six.integer_types:
self.assertTrue(type(v2) in six.integer_types)
else:
self.assertEqual(type(v1), type(v2))
self.assertEqual(v1, v2)
try_eq_sa_val(1, 2)
try_eq_sa_val(1.0, 2)
try_eq_sa_val(1, 2.0)
try_eq_sa_val(1.0, 2.0)
try_eq_sa_val(-1, 2)
try_eq_sa_val(-1.0, 2)
try_eq_sa_val(-1, 2.0)
try_eq_sa_val(-1.0, 2.0)
try_eq_sa_val([1, -1], 2)
try_eq_sa_val([1, -1], 2.0)
try_eq_sa_val(2,[3, -3])
try_eq_sa_val(2.0,[3, -3])
def test_floodiv_corner(self):
def try_eq_sa_val(left_val, right_val):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(right_val) is array.array:
if type(left_val) is array.array:
v2 = array.array('d', [lv // rv for lv, rv in zip(left_val, right_val)])
else:
v2 = array.array('d', [left_val // rv for rv in right_val])
else:
if type(left_val) is array.array:
v2 = array.array('d', [lv // right_val for lv in left_val])
else:
v2 = left_val // right_val
if type(v1) in six.integer_types:
self.assertTrue(type(v2) in six.integer_types)
else:
self.assertEqual(type(v1), type(v2))
self.assertEqual(v1, v2)
try_eq_sa_val(1, 2)
try_eq_sa_val(1.0, 2)
try_eq_sa_val(1, 2.0)
try_eq_sa_val(1.0, 2.0)
try_eq_sa_val(-1, 2)
try_eq_sa_val(-1.0, 2)
try_eq_sa_val(-1, 2.0)
try_eq_sa_val(-1.0, 2.0)
try_eq_sa_val([1, -1], 2)
try_eq_sa_val([1, -1], 2.0)
try_eq_sa_val(2,[3, -3])
try_eq_sa_val(2.0,[3, -3])
from math import isnan
def try_eq_sa_correct(left_val, right_val, correct):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(correct) is not list:
v1 = [v1]
correct = [correct]
for v, c in zip(v1, correct):
if type(v) is float and isnan(v):
assert isnan(c)
else:
self.assertEqual(type(v), type(c))
self.assertEqual(v, c)
try_eq_sa_correct(1, 0, None)
try_eq_sa_correct(0, 0, None)
try_eq_sa_correct(-1, 0, None)
try_eq_sa_correct(1.0, 0, float('inf'))
try_eq_sa_correct(0.0, 0, float('nan'))
try_eq_sa_correct(-1.0, 0, float('-inf'))
try_eq_sa_correct([1.0,0,-1], 0, [float('inf'), float('nan'), float('-inf')])
try_eq_sa_correct(1, [1.0, 0], [1., float('inf')])
try_eq_sa_correct(-1, [1.0, 0], [-1., float('-inf')])
try_eq_sa_correct(0, [1.0, 0], [0., float('nan')])
def test_logical_ops(self):
s=np.array([0,0,0,0,1,1,1,1]);
s2=np.array([0,1,0,1,0,1,0,1]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t & t2, list(((s & s2) > 0).astype(int)), int)
self.__test_equal(t | t2, list(((s | s2) > 0).astype(int)), int)
def test_string_operators(self):
s=["a","b","c","d","e","f","g","h","i","j"];
s2=["e","d","c","b","a","j","i","h","g","f"];
t = SArray(s, str)
t2 = SArray(s2, str)
self.__test_equal(t + t2, ["".join(x) for x in zip(s,s2)], str)
self.__test_equal(t + "x", [x + "x" for x in s], str)
self.__test_equal(t < t2, [x < y for (x,y) in zip(s,s2)], int)
self.__test_equal(t > t2, [x > y for (x,y) in zip(s,s2)], int)
self.__test_equal(t == t2, [x == y for (x,y) in zip(s,s2)], int)
self.__test_equal(t != t2, [x != y for (x,y) in zip(s,s2)], int)
self.__test_equal(t <= t2, [x <= y for (x,y) in zip(s,s2)], int)
self.__test_equal(t >= t2, [x >= y for (x,y) in zip(s,s2)], int)
def test_vector_operator_missing_propagation(self):
t = SArray([1,2,3,4,None,6,7,8,9,None], float) # missing 4th and 9th
t2 = SArray([None,4,3,2,np.nan,10,9,8,7,6], float) # missing 0th and 4th
self.assertEquals(len((t + t2).dropna()), 7);
self.assertEquals(len((t - t2).dropna()), 7);
self.assertEquals(len((t * t2).dropna()), 7);
def test_dropna(self):
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
self.assertEquals(len(t.dropna()), 6)
self.assertEquals(list(t.dropna()), no_nas)
t2 = SArray([None,np.nan])
self.assertEquals(len(t2.dropna()), 0)
self.assertEquals(list(SArray(self.int_data).dropna()), self.int_data)
self.assertEquals(list(SArray(self.float_data).dropna()), self.float_data)
def test_fillna(self):
# fillna shouldn't fill anything
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
out = t.fillna('hello')
self.assertEquals(list(out), no_nas)
# Normal integer case (float auto casted to int)
t = SArray([53,23,None,np.nan,5])
self.assertEquals(list(t.fillna(-1.0)), [53,23,-1,-1,5])
# dict type
t = SArray(self.dict_data+[None])
self.assertEquals(list(t.fillna({1:'1'})), self.dict_data+[{1:'1'}])
# list type
t = SArray(self.list_data+[None])
self.assertEquals(list(t.fillna([0,0,0])), self.list_data+[[0,0,0]])
# vec type
t = SArray(self.vec_data+[None])
self.assertEquals(list(t.fillna(array.array('f',[0.0,0.0]))), self.vec_data+[array.array('f',[0.0,0.0])])
# empty sarray
t = SArray()
self.assertEquals(len(t.fillna(0)), 0)
def test_sample(self):
sa = SArray(data=self.int_data)
sa_sample = sa.sample(.5, 9)
sa_sample2 = sa.sample(.5, 9)
self.assertEqual(list(sa_sample.head()), list(sa_sample2.head()))
for i in sa_sample:
self.assertTrue(i in self.int_data)
with self.assertRaises(ValueError):
sa.sample(3)
sa_sample = SArray().sample(.5, 9)
self.assertEqual(len(sa_sample), 0)
def test_hash(self):
a = SArray([0,1,0,1,0,1,0,1], int)
b = a.hash()
zero_hash = b[0]
one_hash = b[1]
self.assertTrue((b[a] == one_hash).all())
self.assertTrue((b[1-a] == zero_hash).all())
# I can hash other stuff too
# does not throw
a.astype(str).hash().__materialize__()
a.apply(lambda x: [x], list).hash().__materialize__()
# Nones hash too!
a = SArray([None, None, None], int).hash()
self.assertTrue(a[0] is not None)
self.assertTrue((a == a[0]).all())
# different seeds give different hash values
self.assertTrue((a.hash(seed=0) != a.hash(seed=1)).all())
def test_random_integers(self):
a = SArray.random_integers(0)
self.assertEqual(len(a), 0)
a = SArray.random_integers(1000)
self.assertEqual(len(a), 1000)
def test_vector_slice(self):
d=[[1],[1,2],[1,2,3]]
g=SArray(d, array.array)
self.assertEqual(list(g.vector_slice(0).head()), [1,1,1])
self.assertEqual(list(g.vector_slice(0,2).head()), [None,array.array('d', [1,2]),array.array('d', [1,2])])
self.assertEqual(list(g.vector_slice(0,3).head()), [None,None,array.array('d', [1,2,3])])
g=SArray(self.vec_data, array.array);
self.__test_equal(g.vector_slice(0), self.float_data, float)
self.__test_equal(g.vector_slice(0, 2), self.vec_data, array.array)
def _my_subslice(self, arr, start=None, stop=None, step=1):
return arr.apply(lambda x: x[slice(start, stop, step)], arr.dtype())
def _slice_equality_test(self, arr, start=None, stop=None, step=1):
self.assertEqual(
list(arr.subslice(start, stop, step)),
list(self._my_subslice(arr,start,stop,step)))
def test_subslice(self):
#string slicing
g=SArray(range(1,1000, 10)).astype(str)
self._slice_equality_test(g, 0, 2);
self._slice_equality_test(g, 0, -1, 2);
self._slice_equality_test(g, -1, -3);
self._slice_equality_test(g, -1, -2, -1);
self._slice_equality_test(g, None, None, -1);
self._slice_equality_test(g, -100, -1);
#list slicing
g=SArray(range(1,10)).apply(lambda x: list(range(x)), list)
self._slice_equality_test(g, 0, 2);
self._slice_equality_test(g, 0, -1, 2);
self._slice_equality_test(g, -1, -3);
self._slice_equality_test(g, -1, -2, -1);
self._slice_equality_test(g, None, None, -1);
self._slice_equality_test(g, -100, -1);
#array slicing
import array
g=SArray(range(1,10)).apply(lambda x: array.array('d', range(x)))
self._slice_equality_test(g, 0, 2);
self._slice_equality_test(g, 0, -1, 2);
self._slice_equality_test(g, -1, -3);
self._slice_equality_test(g, -1, -2, -1);
self._slice_equality_test(g, None, None, -1);
self._slice_equality_test(g, -100, -1);
#this should fail
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).subslice(1)
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).astype(float).subslice(1)
def test_lazy_eval(self):
sa = SArray(range(-10, 10))
sa = sa + 1
sa1 = sa >= 0
sa2 = sa <= 0
sa3 = sa[sa1 & sa2]
item_count = sa3.size()
self.assertEqual(item_count, 1)
def __test_append(self, data1, data2, dtype):
sa1 = SArray(data1, dtype)
sa2 = SArray(data2, dtype)
sa3 = sa1.append(sa2)
self.__test_equal(sa3, data1 + data2, dtype)
sa3 = sa2.append(sa1)
self.__test_equal(sa3, data2 + data1, dtype)
def test_append(self):
n = len(self.int_data)
m = n // 2
self.__test_append(self.int_data[0:m], self.int_data[m:n], int)
self.__test_append(self.bool_data[0:m], self.bool_data[m:n], int)
self.__test_append(self.string_data[0:m], self.string_data[m:n], str)
self.__test_append(self.float_data[0:m], self.float_data[m:n], float)
self.__test_append(self.vec_data[0:m], self.vec_data[m:n], array.array)
self.__test_append(self.dict_data[0:m], self.dict_data[m:n], dict)
def test_append_exception(self):
val1 = [i for i in range(1, 1000)]
val2 = [str(i) for i in range(-10, 1)]
sa1 = SArray(val1, int)
sa2 = SArray(val2, str)
with self.assertRaises(RuntimeError):
sa3 = sa1.append(sa2)
def test_word_count(self):
sa = SArray(["This is someurl http://someurl!!",
"中文 应该也 行",
'Сблъсъкът между'])
expected = [{"this": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
expected2 = [{"This": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
sa1 = sa._count_words()
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected, dict)
sa1 = sa._count_words(to_lower=False)
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected2, dict)
#should fail if the input type is not string
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
sa._count_words()
def test_word_count2(self):
sa = SArray(["This is some url http://www.someurl.com!!", "Should we? Yes, we should."])
#TODO: Get some weird unicode whitespace in the Chinese and Russian tests
expected1 = [{"this": 1, "is": 1, "some": 1, "url": 1, "http://www.someurl.com!!": 1},
{"should": 1, "we?": 1, "we": 1, "yes,": 1, "should.": 1}]
expected2 = [{"this is some url http://www.someurl.com": 1},
{"should we": 1, " yes": 1, " we should.": 1}]
word_counts1 = sa._count_words()
word_counts2 = sa._count_words(delimiters=["?", "!", ","])
self.assertEquals(word_counts1.dtype(), dict)
self.__test_equal(word_counts1, expected1, dict)
self.assertEquals(word_counts2.dtype(), dict)
self.__test_equal(word_counts2, expected2, dict)
def test_ngram_count(self):
sa_word = SArray(["I like big dogs. They are fun. I LIKE BIG DOGS", "I like.", "I like big"])
sa_character = SArray(["Fun. is. fun","Fun is fun.","fu", "fun"])
# Testing word n-gram functionality
result = sa_word._count_ngrams(3)
result2 = sa_word._count_ngrams(2)
result3 = sa_word._count_ngrams(3,"word", to_lower=False)
result4 = sa_word._count_ngrams(2,"word", to_lower=False)
expected = [{'fun i like': 1, 'i like big': 2, 'they are fun': 1, 'big dogs they': 1, 'like big dogs': 2, 'are fun i': 1, 'dogs they are': 1}, {}, {'i like big': 1}]
expected2 = [{'i like': 2, 'dogs they': 1, 'big dogs': 2, 'are fun': 1, 'like big': 2, 'they are': 1, 'fun i': 1}, {'i like': 1}, {'i like': 1, 'like big': 1}]
expected3 = [{'I like big': 1, 'fun I LIKE': 1, 'I LIKE BIG': 1, 'LIKE BIG DOGS': 1, 'They are fun': 1, 'big dogs They': 1, 'like big dogs': 1, 'are fun I': 1, 'dogs They are': 1}, {}, {'I like big': 1}]
expected4 = [{'I like': 1, 'like big': 1, 'I LIKE': 1, 'BIG DOGS': 1, 'are fun': 1, 'LIKE BIG': 1, 'big dogs': 1, 'They are': 1, 'dogs They': 1, 'fun I': 1}, {'I like': 1}, {'I like': 1, 'like big': 1}]
self.assertEquals(result.dtype(), dict)
self.__test_equal(result, expected, dict)
self.assertEquals(result2.dtype(), dict)
self.__test_equal(result2, expected2, dict)
self.assertEquals(result3.dtype(), dict)
self.__test_equal(result3, expected3, dict)
self.assertEquals(result4.dtype(), dict)
self.__test_equal(result4, expected4, dict)
#Testing character n-gram functionality
result5 = sa_character._count_ngrams(3, "character")
result6 = sa_character._count_ngrams(2, "character")
result7 = sa_character._count_ngrams(3, "character", to_lower=False)
result8 = sa_character._count_ngrams(2, "character", to_lower=False)
result9 = sa_character._count_ngrams(3, "character", to_lower=False, ignore_space=False)
result10 = sa_character._count_ngrams(2, "character", to_lower=False, ignore_space=False)
result11 = sa_character._count_ngrams(3, "character", to_lower=True, ignore_space=False)
result12 = sa_character._count_ngrams(2, "character", to_lower=True, ignore_space=False)
expected5 = [{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {}, {'fun': 1}]
expected6 = [{'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected7 = [{'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {}, {'fun': 1}]
expected8 = [{'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected9 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {}, {'fun': 1}]
expected10 = [{' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected11 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {}, {'fun': 1}]
expected12 = [{' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
self.assertEquals(result5.dtype(), dict)
self.__test_equal(result5, expected5, dict)
self.assertEquals(result6.dtype(), dict)
self.__test_equal(result6, expected6, dict)
self.assertEquals(result7.dtype(), dict)
self.__test_equal(result7, expected7, dict)
self.assertEquals(result8.dtype(), dict)
self.__test_equal(result8, expected8, dict)
self.assertEquals(result9.dtype(), dict)
self.__test_equal(result9, expected9, dict)
self.assertEquals(result10.dtype(), dict)
self.__test_equal(result10, expected10, dict)
self.assertEquals(result11.dtype(), dict)
self.__test_equal(result11, expected11, dict)
self.assertEquals(result12.dtype(), dict)
self.__test_equal(result12, expected12, dict)
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
#should fail if the input type is not string
sa._count_ngrams()
with self.assertRaises(TypeError):
#should fail if n is not of type 'int'
sa_word._count_ngrams(1.01)
with self.assertRaises(ValueError):
#should fail with invalid method
sa_word._count_ngrams(3,"bla")
with self.assertRaises(ValueError):
#should fail with n <0
sa_word._count_ngrams(0)
with warnings.catch_warnings(record=True) as context:
sa_word._count_ngrams(10)
assert len(context) == 1
def test_dict_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_keys = sa.dict_keys()
self.assertEquals([set(i) for i in sa_keys], [{str(i), i} for i in self.int_data])
# na value
d = [{'a': 1}, {None: 2}, {"b": None}, None]
sa = SArray(d)
sa_keys = sa.dict_keys()
self.assertEquals(list(sa_keys), [['a'], [None], ['b'], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_keys()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_keys().head(10)), [], list)
def test_dict_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_values = sa.dict_values()
self.assertEquals(list(sa_values), [[i, float(i)] for i in self.int_data])
# na value
d = [{'a': 1}, {None: 'str'}, {"b": None}, None]
sa = SArray(d)
sa_values = sa.dict_values()
self.assertEquals(list(sa_values), [[1], ['str'], [None], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_values()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_values().head(10)), [], list)
def test_dict_trim_by_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': [1,2]}, {None: 'str'}, {"b": None, "c": 1}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_keys(['a', 'b'])
self.assertEquals(list(sa_values), [{}, {None: 'str'}, {"c": 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_keys([])
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_keys([]).head(10)), [], list)
def test_dict_trim_by_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_values(5,10)
self.assertEquals(list(sa_values), [{'c':None}, {None:5}, None])
# no upper key
sa_values = sa.dict_trim_by_values(2)
self.assertEquals(list(sa_values), [{'b': 20, 'c':None}, {"b": 4, None:5}, None])
# no param
sa_values = sa.dict_trim_by_values()
self.assertEquals(list(sa_values), [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None])
# no lower key
sa_values = sa.dict_trim_by_values(upper=7)
self.assertEquals(list(sa_values), [{'a':1, 'c':None}, {"b": 4, None: 5}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_values()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_values().head(10)), [], list)
def test_dict_has_any_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_any_keys([])
self.assertEquals(list(sa_values), [0,0,None,0])
sa_values = sa.dict_has_any_keys(['a'])
self.assertEquals(list(sa_values), [1,0,None,1])
# one value is auto convert to list
sa_values = sa.dict_has_any_keys("a")
self.assertEquals(list(sa_values), [1,0,None,1])
sa_values = sa.dict_has_any_keys(['a', 'b'])
self.assertEquals(list(sa_values), [1,1,None,1])
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_any_keys([]).head(10)), [], list)
def test_dict_has_all_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_all_keys([])
self.assertEquals(list(sa_values), [1,1,None,1])
sa_values = sa.dict_has_all_keys(['a'])
self.assertEquals(list(sa_values), [1,0,None,1])
# one value is auto convert to list
sa_values = sa.dict_has_all_keys("a")
self.assertEquals(list(sa_values), [1,0,None,1])
sa_values = sa.dict_has_all_keys(['a', 'b'])
self.assertEquals(list(sa_values), [1,0,None,0])
sa_values = sa.dict_has_all_keys([None, "b"])
self.assertEquals(list(sa_values), [0,1,None,0])
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_all_keys([]).head(10)), [], list)
def test_save_load_cleanup_file(self):
# simlarly for SArray
with util.TempDirectory() as f:
sa = SArray(range(1,1000000))
sa.save(f)
# 17 for each sarray, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# sf1 now references the on disk file
sa1 = SArray(f);
# create another SFrame and save to the same location
sa2 = SArray([str(i) for i in range(1,100000)])
sa2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# now sf1 should still be accessible
self.__test_equal(sa1, list(sa), int)
# and sf2 is correct too
sa3 = SArray(f)
self.__test_equal(sa3, list(sa2), str)
# when sf1 goes out of scope, the tmp files should be gone
sa1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# list_to_compare must have all unique values for this to work
def __generic_unique_test(self, list_to_compare):
test = SArray(list_to_compare + list_to_compare)
self.assertEquals(sorted(list(test.unique())), sorted(list_to_compare))
def test_unique(self):
# Test empty SArray
test = SArray([])
self.assertEquals(list(test.unique()), [])
# Test one value
test = SArray([1])
self.assertEquals(list(test.unique()), [1])
# Test many of one value
test = SArray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEquals(list(test.unique()), [1])
# Test all unique values
test = SArray(self.int_data)
self.assertEquals(sorted(list(test.unique())), self.int_data)
# Test an interesting sequence
interesting_ints = [4654,4352436,5453,7556,45435,4654,5453,4654,5453,1,1,1,5,5,5,8,66,7,7,77,90,-34]
test = SArray(interesting_ints)
u = test.unique()
self.assertEquals(len(u), 13)
# We do not preserve order
self.assertEquals(sorted(list(u)), sorted(np.unique(interesting_ints)))
# Test other types
self.__generic_unique_test(self.string_data[0:6])
# only works reliably because these are values that floats can perform
# reliable equality tests
self.__generic_unique_test(self.float_data)
self.__generic_unique_test(self.list_data)
self.__generic_unique_test(self.vec_data)
with self.assertRaises(TypeError):
SArray(self.dict_data).unique()
def test_item_len(self):
# empty SArray
test = SArray([])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# wrong type
test = SArray([1,2,3])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
test = SArray(['1','2','3'])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# vector type
test = SArray([[], [1], [1,2], [1,2,3], None])
item_length = test.item_length();
self.assertEquals(list(item_length), list([0, 1,2,3,None]))
# dict type
test = SArray([{}, {'key1': 1}, {'key2':1, 'key1':2}, None])
self.assertEquals(list(test.item_length()), list([0, 1,2,None]))
# list type
test = SArray([[], [1,2], ['str', 'str2'], None])
self.assertEquals(list(test.item_length()), list([0, 2,2,None]))
def test_random_access(self):
t = list(range(0,100000))
s = SArray(t)
# simple slices
self.__test_equal(s[1:10000], t[1:10000], int)
self.__test_equal(s[0:10000:3], t[0:10000:3], int)
self.__test_equal(s[1:10000:3], t[1:10000:3], int)
self.__test_equal(s[2:10000:3], t[2:10000:3], int)
self.__test_equal(s[3:10000:101], t[3:10000:101], int)
# negative slices
self.__test_equal(s[-5:], t[-5:], int)
self.__test_equal(s[-1:], t[-1:], int)
self.__test_equal(s[-100:-10], t[-100:-10], int)
self.__test_equal(s[-100:-10:2], t[-100:-10:2], int)
# single element reads
self.assertEquals(s[511], t[511])
self.assertEquals(s[1912], t[1912])
self.assertEquals(s[-1], t[-1])
self.assertEquals(s[-10], t[-10])
# A cache boundary
self.assertEquals(s[32*1024-1], t[32*1024-1])
self.assertEquals(s[32*1024], t[32*1024])
# totally different
self.assertEquals(s[19312], t[19312])
# edge case odities
self.__test_equal(s[10:100:100], t[10:100:100], int)
self.__test_equal(s[-100:len(s):10], t[-100:len(t):10], int)
self.__test_equal(s[-1:-2], t[-1:-2], int)
self.__test_equal(s[-1:-1000:2], t[-1:-1000:2], int)
with self.assertRaises(IndexError):
s[len(s)]
# with caching abilities; these should be fast, as 32K
# elements are cached.
for i in range(0, 100000, 100):
self.assertEquals(s[i], t[i])
for i in range(0, 100000, 100):
self.assertEquals(s[-i], t[-i])
def test_sort(self):
test = SArray([1,2,3,5,1,4])
ascending = SArray([1,1,2,3,4,5])
descending = SArray([5,4,3,2,1,1])
result = test.sort()
self.assertEqual(list(result), list(ascending))
result = test.sort(ascending = False)
self.assertEqual(list(result), list(descending))
with self.assertRaises(TypeError):
SArray([[1,2], [2,3]]).sort()
def test_unicode_encode_should_not_fail(self):
g=SArray([{'a':u'\u2019'}])
g=SArray([u'123',u'\u2019'])
g=SArray(['123',u'\u2019'])
def test_read_from_avro(self):
encoded_data = 'T2JqAQQWYXZyby5zY2hlbWHsBXsiZmllbGRzIjogW3sidHlwZSI6ICJzdHJpbmciLCAibmFtZSI6ICJidXNpbmVzc19pZCJ9LCB7InR5cGUiOiAic3RyaW5nIiwgIm5hbWUiOiAiZGF0ZSJ9LCB7InR5cGUiOiAic3RyaW5nIiwgIm5hbWUiOiAicmV2aWV3X2lkIn0sIHsidHlwZSI6ICJpbnQiLCAibmFtZSI6ICJzdGFycyJ9LCB7InR5cGUiOiAic3RyaW5nIiwgIm5hbWUiOiAidGV4dCJ9LCB7InR5cGUiOiAic3RyaW5nIiwgIm5hbWUiOiAidHlwZSJ9LCB7InR5cGUiOiAic3RyaW5nIiwgIm5hbWUiOiAidXNlcl9pZCJ9LCB7InR5cGUiOiB7InR5cGUiOiAibWFwIiwgInZhbHVlcyI6ICJpbnQifSwgIm5hbWUiOiAidm90ZXMifV0sICJ0eXBlIjogInJlY29yZCIsICJuYW1lIjogInJldmlldyJ9FGF2cm8uY29kZWMIbnVsbAAON5ELIy6PokglPEeciZP7BOggLHNnQmwzVURFY05ZS3d1VWI5MkNZZEEUMjAwOS0wMS0yNSxaai1SMFpacUlLRng1NkxZMnN1MWlRCIAZVGhlIG93bmVyIG9mIENoaW5hIEtpbmcgaGFkIG5ldmVyIGhlYXJkIG9mIFllbHAuLi51bnRpbCBKaW0gVyByb2xsZWQgdXAgb24gQ2hpbmEgS2luZyEKClRoZSBvd25lciBvZiBDaGluYSBLaW5nLCBNaWNoYWVsLCBpcyB2ZXJ5IGZyaWVuZGx5IGFuZCBjaGF0dHkuICBCZSBQcmVwYXJlZCB0byBjaGF0IGZvciBhIGZldyBtaW51dGVzIGlmIHlvdSBzdHJpa2UgdXAgYSBjb252ZXJzYXRpb24uCgpUaGUgc2VydmljZSBoZXJlIHdhcyB0ZXJyaWZpYy4gIFdlIGhhZCBzZXZlcmFsIHBlb3BsZSBmdXNzaW5nIG92ZXIgdXMgYnV0IHRoZSBwcmltYXJ5IHNlcnZlciwgTWFnZ2llIHdhcyBhIGdlbS4gIAoKTXkgd2lmZSBhbmQgdGhlIGtpZHMgb3B0ZWQgZm9yIHRoZSBBbWVyaWNhbml6ZWQgbWVudSBhbmQgd2VudCB3aXRoIHNwZWNpYWxzIGxpa2Ugc3dlZXQgYW5kIHNvdXIgY2hpY2tlbiwgc2hyaW1wIGluIHdoaXRlIHNhdWNlIGFuZCBnYXJsaWMgYmVlZi4gIEVhY2ggY2FtZSBjYW1lIHdpdGggc291cCwgZWdnIHJvbGwgYW5kIHJpY2UuICBJIHNhbXBsZWQgdGhlIGdhcmxpYyBiZWVmIHdoaWNoIHRoZXkgcHJlcGFyZWQgd2l0aCBhIGt1bmcgcGFvIGJyb3duIHNhdWNlIChhIGRlY2lzaW9uIE1hZ2dpZSBhbmQgbXkgd2lmZSBhcnJpdmVkIGF0IGFmdGVyIHNldmVyYWwgbWludXRlcyBvZiBkaXNjdXNzaW9uKSBpdCBoYWQgYSBuaWNlIHJvYnVzdCBmbGF2b3IgYW5kIHRoZSB2ZWdnaWVzIHdlcmUgZnJlc2ggYW5kIGZsYXZvcmZ1bC4gIEkgIGFsc28gc2FtcGxlZCB0aGUgc2hyaW1wIHdoaWNoIHdlcmUgc3VjY3VsZW50IGFuZCB0aGUgd2hpdGUgc2F1Y2UgaGFkIGEgbGl0dGxlIG1vcmUgZGlzdGluY3RpdmVuZXNzIHRvIGl0IHRoYW4gdGhlIHNhbWUgc2F1Y2UgYXQgbWFueSBDaGluZXNlIHJlc3RhdXJhbnRzLgoKSSBvcmRlcmVkIGZyb20gdGhlIHRyYWRpdGlvbmFsIG1lbnUgYnV0IHdlbnQgbm90IHRvbyBhZHZlbnR1cm91cyB3aXRoIHNpenpsaW5nIHBsYXRlIHdpdGggc2NhbGxvcHMgYW5kIHNocmltcCBpbiBibGFjayBwZXBwZXIgc2F1Y2UuICBWZXJ5IGVuam95YWJsZS4gIEFnYWluLCBzdWNjdWxlbnQgc2hyaW1wLiAgVGhlIHNjYWxsb3BzIHdlcmUgdGFzdHkgYXMgd2VsbC4gIFJlYWxpemluZyB0aGF0IEkgbW92ZWQgaGVyZSBmcm9tIEJvc3RvbiBhbmQgSSBnbyBpbnRvIGFueSBzZWFmb29kIGV4cGVyaWVuY2Ugd2l0aCBkaW1pbmlzaGVkIGV4cGVjdGF0aW9ucyBub3cgdGhhdCBJIGxpdmUgaW4gdGhlIHdlc3QsIEkgaGF2ZSB0byBzYXkgdGhlIHNjYWxsb3BzIGFyZSBhbW9uZyB0aGUgZnJlc2hlciBhbmQganVkaWNpb3VzbHkgcHJlcGFyZWQgdGhhdCBJIGhhdmUgaGFkIGluIFBob2VuaXguCgpPdmVyYWxsIENoaW5hIEtpbmcgZGVsaXZlcmVkIGEgdmVyeSB0YXN0eSBhbmQgdmVyeSBmcmVzaCBtZWFsLiAgVGhleSBoYXZlIGEgZmFpcmx5IGV4dGVuc2l2ZSB0cmFkaXRpb25hbCBtZW51IHdoaWNoIEkgbG9vayBmb3J3YXJkIHRvIGV4cGxvcmluZyBmdXJ0aGVyLgoKVGhhbmtzIHRvIENocmlzdGluZSBPIGZvciBoZXIgcmV2aWV3Li4uYWZ0ZXIgcmVhZGluZyB0aGF0IEkga25ldyBDaGluYSBLaW5nIHdhcyBBLU9LLgxyZXZpZXcsUDJrVms0Y0lXeUs0ZTRoMTRSaEstUQYKZnVubnkIDHVzZWZ1bBIIY29vbA4ALGFyS2NrTWY3bEdOWWpYaktvNkRYY0EUMjAxMi0wNS0wNSxFeVZmaFJEbHlpcDJFcktNT0hFQS1BCKQEV2UndmUgYmVlbiBoZXJlIGEgZmV3IHRpbWVzIGFuZCB3ZSBsb3ZlIGFsbCB0aGUgZnJlc2ggaW5ncmVkaWVudHMuIFRoZSBwaXp6YSBpcyBnb29kIHdoZW4geW91IGVhdCBpdCBmcmVzaCBidXQgaWYgeW91IGxpa2UgdG8gZWF0IHlvdXIgcGl6emEgY29sZCB0aGVuIHlvdSdsbCBiZSBiaXRpbmcgaW50byBoYXJkIGRvdWdoLiBUaGVpciBOdXRlbGxhIHBpenphIGlzIGdvb2QuIFRha2UgYSBtZW51IGFuZCBjaGVjayBvdXQgdGhlaXIgbWVudSBhbmQgaG91cnMgZm9yIHNwZWNpYWxzLgxyZXZpZXcseDFZbDFkcE5jV0NDRWRwTUU5ZGcwZwYKZnVubnkCDHVzZWZ1bAIIY29vbAAADjeRCyMuj6JIJTxHnImT+w=='
test_avro_file = open("test.avro", "wb")
test_avro_file.write(binascii.a2b_base64(encoded_data))
test_avro_file.close()
sa = SArray.from_avro("test.avro")
self.assertEqual(sa.dtype(), dict)
self.assertEqual(len(sa), 2)
def test_from_const(self):
g = SArray.from_const('a', 100)
self.assertEqual(len(g), 100)
self.assertEqual(list(g), ['a']*100)
g = SArray.from_const(dt.datetime(2013, 5, 7, 10, 4, 10),10)
self.assertEqual(len(g), 10)
self.assertEqual(list(g), [dt.datetime(2013, 5, 7, 10, 4, 10)]*10)
g = SArray.from_const(0, 0)
self.assertEqual(len(g), 0)
g = SArray.from_const(None, 100)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), float)
g = SArray.from_const(None, 100, str)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), str)
g = SArray.from_const(0, 100, float)
self.assertEquals(list(g), [0.0] * 100)
self.assertEqual(g.dtype(), float)
g = SArray.from_const(0.0, 100, int)
self.assertEquals(list(g), [0] * 100)
self.assertEqual(g.dtype(), int)
g = SArray.from_const(None, 100, float)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), float)
g = SArray.from_const(None, 100, int)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), int)
g = SArray.from_const(None, 100, list)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), list)
g = SArray.from_const([1], 100, list)
self.assertEquals(list(g), [[1]] * 100)
self.assertEqual(g.dtype(), list)
def test_from_sequence(self):
with self.assertRaises(TypeError):
g = SArray.from_sequence()
g = SArray.from_sequence(100)
self.assertEqual(list(g), list(range(100)))
g = SArray.from_sequence(10, 100)
self.assertEqual(list(g), list(range(10, 100)))
g = SArray.from_sequence(100, 10)
self.assertEqual(list(g), list(range(100, 10)))
def test_datetime(self):
sa = SArray(self.datetime_data)
self.__test_equal(sa ,self.datetime_data,dt.datetime)
sa = SArray(self.datetime_data2)
self.__test_equal(sa ,self.datetime_data2,dt.datetime)
ret = sa.split_datetime(limit=['year','month','day','hour','minute',
'second','us','weekday', 'isoweekday','tmweekday'])
self.assertEqual(ret.num_cols(), 10)
self.__test_equal(ret['X.year'] , [2013, 1902, None], int)
self.__test_equal(ret['X.month'] , [5, 10, None], int)
self.__test_equal(ret['X.day'] , [7, 21, None], int)
self.__test_equal(ret['X.hour'] , [10, 10, None], int)
self.__test_equal(ret['X.minute'] , [4, 34, None], int)
self.__test_equal(ret['X.second'] , [10, 10, None], int)
self.__test_equal(ret['X.us'] , [109321, 991111, None], int)
self.__test_equal(ret['X.weekday'] , [1, 1, None], int)
self.__test_equal(ret['X.isoweekday'] , [2, 2, None], int)
self.__test_equal(ret['X.tmweekday'] , [2, 2, None], int)
def test_datetime_difference(self):
sa = SArray(self.datetime_data)
sa2 = SArray(self.datetime_data2)
res = sa2 - sa
expected = [float(x.microsecond) / 1000000.0 if x is not None else x for x in self.datetime_data2]
self.assertEqual(len(res), len(expected))
for i in range(len(res)):
if res[i] == None:
self.assertEqual(res[i], expected[i])
else:
self.assertAlmostEqual(res[i], expected[i], places=6)
def test_datetime_lambda(self):
data = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111,
tzinfo=GMT(1))]
g=SArray(data)
gstr=g.apply(lambda x:str(x))
self.__test_equal(gstr, [str(x) for x in g], str)
gident=g.apply(lambda x:x)
self.__test_equal(gident, list(g), dt.datetime)
def test_datetime_to_str(self):
sa = SArray(self.datetime_data)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None],str)
sa = SArray([None,None,None],dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[None,None,None],str)
sa = SArray(dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[],str)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.datetime_to_str)
sa = SArray()
self.assertRaises(TypeError,sa.datetime_to_str)
def test_str_to_datetime(self):
sa_string = SArray(['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None])
sa_datetime_back = sa_string.str_to_datetime()
expected = self.datetime_data
self.__test_equal(sa_datetime_back,expected,dt.datetime)
sa_string = SArray([None,None,None],str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[None,None,None],dt.datetime)
sa_string = SArray(dtype=str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[],dt.datetime)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.str_to_datetime)
sa = SArray()
self.assertRaises(TypeError,sa.str_to_datetime)
# hour without leading zero
sa = SArray(['10/30/2014 9:01'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M')
expected = [dt.datetime(2014, 10, 30, 9, 1)]
self.__test_equal(sa,expected,dt.datetime)
# without delimiters
sa = SArray(['10302014 0901', '10302014 2001'])
sa = sa.str_to_datetime('%m%d%Y %H%M')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 20, 1)]
self.__test_equal(sa,expected,dt.datetime)
# another without delimiter test
sa = SArray(['20110623T191001'])
sa = sa.str_to_datetime("%Y%m%dT%H%M%S%F%q")
expected = [dt.datetime(2011, 6, 23, 19, 10, 1)]
self.__test_equal(sa,expected,dt.datetime)
# am pm
sa = SArray(['10/30/2014 9:01am', '10/30/2014 9:01pm'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%p')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
sa = SArray(['10/30/2014 9:01AM', '10/30/2014 9:01PM'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%P')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
# failure 13pm
sa = SArray(['10/30/2014 13:01pm'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %H:%M%p')
# failure hour 13 when %l should only have up to hour 12
sa = SArray(['10/30/2014 13:01'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %l:%M')
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %L:%M')
sa = SArray(['2013-05-07T10:04:10',
'1902-10-21T10:34:10UTC+05:45'])
expected = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(5.75))]
self.__test_equal(sa.str_to_datetime() ,expected,dt.datetime)
def test_apply_with_partial(self):
sa = SArray([1, 2, 3, 4, 5])
def concat_fn(character, number):
return '%s%d' % (character, number)
my_partial_fn = functools.partial(concat_fn, 'x')
sa_transformed = sa.apply(my_partial_fn)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sa = SArray([1, 2, 3, 4, 5])
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, number):
return '%s%d' % (self.character, number)
concatenator = Concatenator('x')
sa_transformed = sa.apply(concatenator)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_argmax_argmin(self):
sa = SArray([1,4,-1,10,3,5,8])
index = [sa.argmax(),sa.argmin()]
expected = [3,2]
self.assertEqual(index,expected)
sa = SArray([1,4.3,-1.4,0,3,5.6,8.9])
index = [sa.argmax(),sa.argmin()]
expected = [6,2]
self.assertEqual(index,expected)
#empty case
sa = SArray([])
index = [sa.argmax(),sa.argmin()]
expected = [None,None]
self.assertEqual(index,expected)
# non-numeric type
sa = SArray(["434","43"])
with self.assertRaises(TypeError):
sa.argmax()
with self.assertRaises(TypeError):
sa.argmin()
def test_apply_with_recursion(self):
sa = SArray(range(1000))
sastr = sa.astype(str)
rets = sa.apply(lambda x:sastr[x])
self.assertEqual(list(rets), list(sastr))
def test_save_sarray(self):
'''save lazily evaluated SArray should not matrialize to target folder
'''
data = SArray(range(1000))
data = data[data > 50]
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print(data)
def test_to_numpy(self):
X = SArray(range(100))
import numpy as np
import numpy.testing as nptest
Y = np.array(range(100))
nptest.assert_array_equal(X.to_numpy(), Y)
X = X.astype(str)
Y = np.array([str(i) for i in range(100)])
nptest.assert_array_equal(X.to_numpy(), Y)
def test_rolling_mean(self):
data = SArray(range(1000))
neg_data = SArray(range(-100,100,2))
### Small backward window including current
res = data.rolling_mean(-3,0)
expected = [None for i in range(3)] + [i + .5 for i in range(1,998)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_mean(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_mean(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=3)
expected[2] = 1.0
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=2)
expected[1] = 0.5
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=1)
expected[0] = 0.0
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_mean(-3,0,min_observations=-1)
res = neg_data.rolling_mean(-3,0)
expected = [None for i in range(3)] + [float(i) for i in range(-97,96,2)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = neg_data.astype(float).rolling_mean(-3,0)
self.__test_equal(res,expected,float)
# Test vector input
res = SArray(self.vec_data).rolling_mean(-3,0)
expected = [None for i in range(3)] + [array.array('d',[i+.5, i+1.5]) for i in range(2,9)]
self.__test_equal(res,expected,array.array)
### Small forward window including current
res = data.rolling_mean(0,4)
expected = [float(i) for i in range(2,998)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(0,4)
expected = [float(i) for i in range(-96,95,2)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### Small backward window not including current
res = data.rolling_mean(-5,-1)
expected = [None for i in range(5)] + [float(i) for i in range(2,997)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-5,-1)
expected = [None for i in range(5)] + [float(i) for i in range(-96,94,2)]
self.__test_equal(res,expected,float)
### Small forward window not including current
res = data.rolling_mean(1,5)
expected = [float(i) for i in range(3,998)] + [None for i in range(5)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(1,5)
expected = [float(i) for i in range(-94,96,2)] + [None for i in range(5)]
self.__test_equal(res,expected,float)
### "Centered" rolling aggregate
res = data.rolling_mean(-2,2)
expected = [None for i in range(2)] + [float(i) for i in range(2,998)] + [None for i in range(2)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-2,2)
expected = [None for i in range(2)] + [float(i) for i in range(-96,96,2)] + [None for i in range(2)]
self.__test_equal(res,expected,float)
### Lopsided rolling aggregate
res = data.rolling_mean(-2,1)
expected = [None for i in range(2)] + [i + .5 for i in range(1,998)] + [None for i in range(1)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-2,1)
expected = [None for i in range(2)] + [float(i) for i in range(-97,97,2)] + [None for i in range(1)]
self.__test_equal(res,expected,float)
### A very forward window
res = data.rolling_mean(500,502)
expected = [float(i) for i in range(501,999)] + [None for i in range(502)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(50,52)
expected = [float(i) for i in range(2,98,2)] + [None for i in range(52)]
self.__test_equal(res,expected,float)
### A very backward window
res = data.rolling_mean(-502,-500)
expected = [None for i in range(502)] + [float(i) for i in range(1,499)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-52,-50)
expected = [None for i in range(52)] + [float(i) for i in range(-98,-2,2)]
self.__test_equal(res,expected,float)
### A window size much larger than anticipated segment size
res = data.rolling_mean(0,749)
expected = [i + .5 for i in range(374,625)] + [None for i in range(749)]
self.__test_equal(res,expected,float)
### A window size larger than the array
res = data.rolling_mean(0,1000)
expected = [None for i in range(1000)]
self.__test_equal(res,expected,type(None))
### A window size of 1
res = data.rolling_mean(0,0)
self.__test_equal(res, list(data), float)
res = data.rolling_mean(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, float)
res = data.rolling_mean(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_mean(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_mean(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_mean(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_mean(0,1)
self.__test_equal(res, [1.5,2.5,None], float)
def test_rolling_sum(self):
data = SArray(range(1000))
neg_data = SArray(range(-100,100,2))
### Small backward window including current
res = data.rolling_sum(-3,0)
expected = [None for i in range(3)] + [i for i in range(6,3994,4)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_sum(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_sum(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=3)
expected[2] = 3
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=2)
expected[1] = 1
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=1)
expected[0] = 0
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=0)
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_sum(-3,0,min_observations=-1)
res = neg_data.rolling_sum(-3,0)
expected = [None for i in range(3)] + [i for i in range(-388,388,8)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = neg_data.astype(float).rolling_sum(-3,0)
self.__test_equal(res,expected,float)
# Test vector input
res = SArray(self.vec_data).rolling_sum(-3,0)
expected = [None for i in range(3)] + [array.array('d',[i, i+4]) for i in range(10,38,4)]
self.__test_equal(res,expected,array.array)
### Small forward window including current
res = data.rolling_sum(0,4)
expected = [i for i in range(10,4990,5)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(0,4)
expected = [i for i in range(-480,480,10)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### Small backward window not including current
res = data.rolling_sum(-5,-1)
expected = [None for i in range(5)] + [i for i in range(10,4985,5)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-5,-1)
expected = [None for i in range(5)] + [i for i in range(-480,470,10)]
self.__test_equal(res,expected,int)
### Small forward window not including current
res = data.rolling_sum(1,5)
expected = [i for i in range(15,4990,5)] + [None for i in range(5)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(1,5)
expected = [i for i in range(-470,480,10)] + [None for i in range(5)]
self.__test_equal(res,expected,int)
### "Centered" rolling aggregate
res = data.rolling_sum(-2,2)
expected = [None for i in range(2)] + [i for i in range(10,4990,5)] + [None for i in range(2)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-2,2)
expected = [None for i in range(2)] + [i for i in range(-480,480,10)] + [None for i in range(2)]
self.__test_equal(res,expected,int)
### Lopsided rolling aggregate
res = data.rolling_sum(-2,1)
expected = [None for i in range(2)] + [i for i in range(6,3994,4)] + [None for i in range(1)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-2,1)
expected = [None for i in range(2)] + [i for i in range(-388,388,8)] + [None for i in range(1)]
self.__test_equal(res,expected,int)
### A very forward window
res = data.rolling_sum(500,502)
expected = [i for i in range(1503,2997,3)] + [None for i in range(502)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(50,52)
expected = [i for i in range(6,294,6)] + [None for i in range(52)]
self.__test_equal(res,expected,int)
### A very backward window
res = data.rolling_sum(-502,-500)
expected = [None for i in range(502)] + [i for i in range(3,1497,3)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-52,-50)
expected = [None for i in range(52)] + [i for i in range(-294,-6,6)]
self.__test_equal(res,expected,int)
### A window size much larger than anticipated segment size
res = data.rolling_sum(0,749)
expected = [i for i in range(280875,469125,750)] + [None for i in range(749)]
self.__test_equal(res,expected,int)
### A window size larger than the array
res = data.rolling_sum(0,1000)
expected = [None for i in range(1000)]
self.__test_equal(res,expected,type(None))
### A window size of 1
res = data.rolling_sum(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_sum(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_sum(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_sum(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_sum(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_sum(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_sum(0,1)
self.__test_equal(res, [3,5,None], int)
def test_rolling_max(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_max(-3,0)
expected = [None for i in range(3)] + [i for i in range(3,1000)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_max(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_max(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_max(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_max(-3, 0, min_observations=3)
expected[2] = 2
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_max(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_max(-3,0)
### Small forward window including current
res = data.rolling_max(0,4)
expected = [float(i) for i in range(4,1000)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_max(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_max(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_max(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_max(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_max(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_max(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_max(0,1)
self.__test_equal(res, [2,3,None], int)
def test_rolling_min(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_min(-3,0)
expected = [None for i in range(3)] + [i for i in range(0,997)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_min(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_min(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_min(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_min(-3, 0, min_observations=3)
expected[2] = 0
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_min(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_min(-3,0)
### Small forward window including current
res = data.rolling_min(0,4)
expected = [float(i) for i in range(0,996)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_min(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_min(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_min(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_min(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_min(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_min(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_min(0,1)
self.__test_equal(res, [1,2,None], int)
def test_rolling_var(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_var(-3,0)
expected = [None for i in range(3)] + [1.25 for i in range(997)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_var(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_var(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_var(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_var(-3, 0, min_observations=3)
expected[2] = (2.0/3.0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_var(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_var(-3,0)
### Small forward window including current
res = data.rolling_var(0,4)
expected = [2 for i in range(996)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### A window size of 1
res = data.rolling_var(0,0)
self.__test_equal(res, [0 for i in range(1000)], float)
res = data.rolling_var(-2,-2)
self.__test_equal(res, [None,None] + [0 for i in range(998)], float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_var(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_var(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_var(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_var(0,1)
self.__test_equal(res, [.25,.25,None], float)
def test_rolling_stdv(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_stdv(-3,0)
expected = [None for i in range(3)] + [1.118033988749895 for i in range(997)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_stdv(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_stdv(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_stdv(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_stdv(-3, 0, min_observations=3)
expected[2] = math.sqrt(2.0/3.0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_stdv(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_stdv(-3,0)
### Small forward window including current
res = data.rolling_stdv(0,4)
expected = [math.sqrt(2) for i in range(996)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### A window size of 1
res = data.rolling_stdv(0,0)
self.__test_equal(res, [0 for i in range(1000)], float)
res = data.rolling_stdv(-2,-2)
self.__test_equal(res, [None,None] + [0 for i in range(998)], float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_stdv(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_stdv(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_stdv(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_stdv(0,1)
self.__test_equal(res, [.5,.5,None], float)
def test_rolling_count(self):
data = SArray(range(100))
### Small backward window including current
res = data.rolling_count(-3,0)
expected = [1,2,3] + [4 for i in range(97)]
self.__test_equal(res,expected,int)
# Test float inputs
res = data.astype(float).rolling_count(-3,0)
self.__test_equal(res,expected,int)
# Test vector input
res = SArray(self.vec_data).rolling_count(-3,0)
expected = [1,2,3] + [4 for i in range(7)]
self.__test_equal(res,expected,int)
### Test string input
res = SArray(self.string_data).rolling_count(-3,0)
self.__test_equal(res,expected[0:8],int)
### Small forward window including current
res = data.rolling_count(0,4)
expected = [5 for i in range(0,96)] + [4,3,2,1]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_count(0,0)
self.__test_equal(res, [1 for i in range(100)], int)
res = data.rolling_count(-2,-2)
self.__test_equal(res, [0,0] + [1 for i in range(98)], int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_count(4,2)
### Empty SArray
sa = SArray()
res = sa.rolling_count(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_count(0,1)
self.__test_equal(res, [2,2,1], int)
sa = SArray([1,2,None])
res = sa.rolling_count(0,1)
self.__test_equal(res, [2,1,0], int)
@nottest
def cumulative_aggregate_comparison(self, out, ans):
import array
self.assertEqual(out.dtype(), ans.dtype())
self.assertEqual(out.size(), ans.size())
for i in range(len(out)):
if out[i] is None:
self.assertTrue(ans[i] is None)
if ans[i] is None:
self.assertTrue(out[i] is None)
if type(out[i]) != array.array:
self.assertAlmostEqual(out[i], ans[i])
else:
self.assertEqual(len(out[i]), len(ans[i]))
oi = out[i]
ansi = ans[i]
for j in range(len(oi)):
self.assertAlmostEqual(oi, ansi)
def test_cumulative_sum(self):
def single_test(src, ans):
out = src.cumulative_sum();
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_sum()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.1, 1.2, 3.3, 6.4, 10.5, 15.6, 21.7, 28.8])
)
single_test(
SArray([[11.0, 2.0], [22.0, 1.0], [3.0, 4.0], [4.0, 4.0]]),
SArray([[11.0, 2.0], [33.0, 3.0], [36.0, 7.0], [40.0, 11.0]])
)
single_test(
SArray([None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 1, 1, 4, 4, 9])
)
single_test(
SArray([None, [33.0, 3.0], [3.0, 4.0], [4.0, 4.0]]),
SArray([None, [33.0, 3.0], [36.0, 7.0], [40.0, 11.0]])
)
single_test(
SArray([None, [33.0, 3.0], None, [4.0, 4.0]]),
SArray([None, [33.0, 3.0], [33.0, 3.0], [37.0, 7.0]])
)
def test_cumulative_mean(self):
def single_test(src, ans):
out = src.cumulative_mean();
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_mean()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.1, 0.6, 1.1, 1.6, 2.1, 2.6, 3.1, 3.6])
)
single_test(
SArray([[11.0, 22.0], [33.0, 66.0], [4.0, 2.0], [4.0, 2.0]]),
SArray([[11.0, 22.0], [22.0, 44.0], [16.0, 30.0], [13.0, 23.0]])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 1, 1.0, 2.0, 2.0, 3.0])
)
single_test(
SArray([None, [11.0, 22.0], [33.0, 66.0], [4.0, 2.0]]),
SArray([None, [11.0, 22.0], [22.0, 44.0], [16.0, 30.0]])
)
single_test(
SArray([None, [11.0, 22.0], None, [33.0, 66.0], [4.0, 2.0]]),
SArray([None, [11.0, 22.0], [11.0, 22.0], [22.0, 44.0], [16.0, 30.0]])
)
def test_cumulative_min(self):
def single_test(src, ans):
out = src.cumulative_min();
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_min()
single_test(
SArray([0, 1, 2, 3, 4, 5, -1, 7, 8, -2, 10]),
SArray([0, 0, 0, 0, 0, 0, -1, -1, -1, -2, -2])
)
single_test(
SArray([7.1, 6.1, 3.1, 3.9, 4.1, 2.1, 2.9, 0.1]),
SArray([7.1, 6.1, 3.1, 3.1, 3.1, 2.1, 2.1, 0.1])
)
single_test(
SArray([None, 8, 6, 3, 4, None, 6, 2, 8, 9, 1]),
SArray([None, 8, 6, 3, 3, 3, 3, 2, 2, 2, 1])
)
single_test(
SArray([None, 5, None, 3, None, 10]),
SArray([None, 5, 5, 3, 3, 3])
)
def test_cumulative_max(self):
def single_test(src, ans):
out = src.cumulative_max();
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_max()
single_test(
SArray([0, 1, 0, 3, 5, 4, 1, 7, 6, 2, 10]),
SArray([0, 1, 1, 3, 5, 5, 5, 7, 7, 7, 10])
)
single_test(
SArray([2.1, 6.1, 3.1, 3.9, 2.1, 8.1, 8.9, 10.1]),
SArray([2.1, 6.1, 6.1, 6.1, 6.1, 8.1, 8.9, 10.1])
)
single_test(
SArray([None, 1, 6, 3, 4, None, 4, 2, 8, 9, 1]),
SArray([None, 1, 6, 6, 6, 6, 6, 6, 8, 9, 9])
)
single_test(
SArray([None, 2, None, 3, None, 10]),
SArray([None, 2, 2, 3, 3, 10])
)
def test_cumulative_std(self):
def single_test(src, ans):
out = src.cumulative_std();
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_std()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.5, 0.816496580927726, 1.118033988749895,
1.4142135623730951, 1.707825127659933, 2.0, 2.29128784747792,
2.581988897471611, 2.8722813232690143, 3.1622776601683795])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.0, 0.5, 0.81649658092772603, 1.1180339887498949,
1.4142135623730949, 1.707825127659933, 1.9999999999999998,
2.2912878474779195])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.5, 0.816496580927726, 1.118033988749895,
1.4142135623730951, 1.707825127659933, 2.0, 2.29128784747792,
2.581988897471611, 2.8722813232690143, 3.1622776601683795])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 0.0, 0.0, 1.0, 1.0, 1.6329931618554521])
)
def test_cumulative_var(self):
def single_test(src, ans):
out = src.cumulative_var();
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_var()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.25, 0.6666666666666666, 1.25, 2.0, 2.9166666666666665,
4.0, 5.25, 6.666666666666667, 8.25, 10.0])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray( [0.0, 0.25000000000000006, 0.6666666666666666, 1.25,
1.9999999999999996, 2.916666666666666, 3.999999999999999,
5.249999999999998])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.25, 0.6666666666666666, 1.25, 2.0, 2.9166666666666665,
4.0, 5.25, 6.666666666666667, 8.25, 10.0])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 0.0, 0.0, 1.0, 1.0, 2.6666666666666665])
)
def test_numpy_datetime64(self):
# Make all datetimes naive
expected = [i.replace(tzinfo=GMT(0.0)) \
if i is not None and i.tzinfo is None else i for i in self.datetime_data]
# A regular list
iso_str_list = [np.datetime64('2013-05-07T10:04:10Z'),
np.datetime64('1902-10-21T10:34:10Z'),
None]
sa = SArray(iso_str_list)
self.__test_equal(sa,expected,dt.datetime)
iso_str_list[2] = np.datetime64('NaT')
sa = SArray(iso_str_list)
self.__test_equal(sa,expected,dt.datetime)
# A numpy array
np_ary = np.array(iso_str_list)
sa = SArray(np_ary)
self.__test_equal(sa,expected,dt.datetime)
### Every possible type of datetime64
test_str = '1969-12-31T23:59:56Z'
available_time_units = ['h','m','s','ms','us','ns','ps','fs','as']
expected = [dt.datetime(1969,12,31,23,59,56,tzinfo=GMT(0.0)) for i in range(7)]
expected.insert(0,dt.datetime(1969,12,31,23,59,0,tzinfo=GMT(0.0)))
expected.insert(0,dt.datetime(1969,12,31,23,0,0,tzinfo=GMT(0.0)))
for i in range(len(available_time_units)):
sa = SArray([np.datetime64(test_str,available_time_units[i])])
self.__test_equal(sa,[expected[i]],dt.datetime)
test_str = '1908-06-01'
available_date_units = ['Y','M','W','D']
expected = [dt.datetime(1908,6,1,0,0,0,tzinfo=GMT(0.0)) for i in range(4)]
expected[2] = dt.datetime(1908,5,28,0,0,0,tzinfo=GMT(0.0)) # weeks start on Thursday?
expected[0] = dt.datetime(1908,1,1,0,0,0,tzinfo=GMT(0.0))
for i in range(len(available_date_units)):
sa = SArray([np.datetime64(test_str,available_date_units[i])])
self.__test_equal(sa,[expected[i]],dt.datetime)
# Daylight savings time (Just to be safe. datetime64 deals in UTC, and
# we store times in UTC by default, so this shouldn't affect anything)
sa = SArray([np.datetime64('2015-03-08T02:38:00-08')])
expected = [dt.datetime(2015,3,8,10,38,tzinfo=GMT(0.0))]
self.__test_equal(sa, expected, dt.datetime)
# timezone considerations
sa = SArray([np.datetime64('2016-01-01T05:45:00+0545')])
expected = [dt.datetime(2016,1,1,0,0,0,tzinfo=GMT(0.0))]
self.__test_equal(sa, expected, dt.datetime)
### Out of our datetime range
with self.assertRaises(TypeError):
sa = SArray([np.datetime64('1066-10-14T09:00:00Z')])
def test_pandas_timestamp(self):
iso_str_list = [pd.Timestamp('2013-05-07T10:04:10'),
pd.Timestamp('1902-10-21T10:34:10Z'),
None]
sa = SArray(iso_str_list)
self.__test_equal(sa,self.datetime_data,dt.datetime)
iso_str_list[2] = pd.tslib.NaT
sa = SArray(iso_str_list)
self.__test_equal(sa,self.datetime_data,dt.datetime)
sa = SArray([pd.Timestamp('2015-03-08T02:38:00-08')])
expected = [dt.datetime(2015,3,8,2,38,tzinfo=GMT(-8.0))]
self.__test_equal(sa, expected, dt.datetime)
sa = SArray([pd.Timestamp('2016-01-01 05:45:00', tz=GMT(5.75))])
expected = [dt.datetime(2016,1,1,5,45,0,tzinfo=GMT(5.75))]
self.__test_equal(sa, expected, dt.datetime)
def test_decimal(self):
import decimal
test_val = decimal.Decimal(3.0)
sa = SArray([test_val])
expected = [3.0]
self.__test_equal(sa, expected, float)
def test_timedelta(self):
test_val = dt.timedelta(1,1)
sa = SArray([test_val])
expected = [86401.0]
self.__test_equal(sa, expected, float)
def test_materialize(self):
sa= SArray(range(100))
sa = sa[sa > 10]
self.assertFalse(sa.is_materialized())
sa.materialize()
self.assertTrue(sa.is_materialized())
def test_ternary(self):
lista = range(1000)
a = SArray(lista)
# identity
self.__test_equal(SArray.where(a > 10, a, a), lista, int)
# clip lower
self.__test_equal(SArray.where(a > 10, a, 10),
[i if i > 10 else 10 for i in lista], int)
# clip upper
self.__test_equal(SArray.where(a > 10, 10, a),
[10 if i > 10 else i for i in lista], int)
# constants
self.__test_equal(SArray.where(a > 10, 10, 9),
[10 if i > 10 else 9 for i in lista], int)
# constant float
self.__test_equal(SArray.where(a > 10, 10.0, 9.0),
[10.0 if i > 10 else 9.0 for i in lista], float)
# constant str
self.__test_equal(SArray.where(a > 10, "10", "9"),
["10" if i > 10 else "9" for i in lista], str)
#inconsistent types
with self.assertRaises(TypeError):
SArray.where(a > 10, 10, "9") # 10 and "9" different types
#inconsistent types
with self.assertRaises(TypeError):
SArray.where(a > 10, a, "9") # expecting an integer for "a"
# technically different types but type coercion happened
self.__test_equal(SArray.where(a > 10, a, 10.0),
[i if i > 10 else 10 for i in lista], int)
# list types
self.__test_equal(SArray.where(a > 10, [], [1], list),
[[] if i > 10 else [1] for i in lista], list)
# really the same as the above, but using an SArray in place
# of a constant in istrue. And hoping the type coercion
# will take care of [1]
b = SArray([[] for i in range(1000)])
self.__test_equal(SArray.where(a > 10, b, [1]),
[[] if i > 10 else [1] for i in lista], list)
def test_shape(self):
sa = SArray()
self.assertEqual(sa.shape, (0,))
for i in [0,1,2,10,345]:
sa = SArray(range(i))
self.assertEqual(sa.shape, (i,))
def test_random_split(self):
sa = SArray(range(10))
(train, test) = sa.random_split(0.8, seed=12423)
self.assertEqual(list(train), [0, 1, 2, 3, 5, 7, 8, 9])
self.assertEqual(list(test), [4,6])
def test_copy(self):
from copy import copy
sa = SArray(range(1000))
sa_copy = copy(sa)
assert sa is not sa_copy
assert (sa == sa_copy).all()
def test_deepcopy(self):
from copy import deepcopy
sa = SArray(range(1000))
sa_copy = deepcopy(sa)
assert sa is not sa_copy
assert (sa == sa_copy).all()
| bsd-3-clause |
paultopia/auto-sklearn | autosklearn/data/competition_data_manager.py | 5 | 16248 | # Functions performing various input/output operations for the ChaLearn AutoML challenge
# Main contributor: Arthur Pesah, August 2014
# Edits: Isabelle Guyon, October 2014
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRIGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
import numpy as np
import os
import re
import time
import scipy.sparse
try:
import autosklearn.data.competition_c_functions as competition_c_functions
competition_c_functions_is_there = True
except:
competition_c_functions_is_there = False
pass
from autosklearn.data import util as data_util
from autosklearn.data.data_manager import DataManager
from autosklearn.constants import *
def data_dense(filename, feat_type=None, verbose=False):
# The 2nd parameter makes possible a using of the 3 functions of data
# reading (data, data_sparse, data_binary_sparse) without changing
# parameters
# This code is based on scipy.io.arff.arff_load
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
descr = [(str(i), np.float32) for i in range(len(feat_type))]
def generator(row_iter, delim=','):
# Copied from scipy.io.arff.arffread
raw = next(row_iter)
while r_empty.match(raw) or r_comment.match(raw):
raw = next(row_iter)
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(len(feat_type)))
row = raw.split(delim)
# yield tuple([np.float64(row[i]) for i in elems])
yield tuple([row[i] for i in elems])
for raw in row_iter:
while r_comment.match(raw) or r_empty.match(raw):
raw = next(row_iter)
row = raw.split(delim)
# yield tuple([np.float64(row[i]) for i in elems])
yield tuple([row[i] for i in elems])
with open(filename) as fh:
a = generator(fh, delim=" ")
# No error should happen here: it is a bug otherwise
data = np.fromiter(a, descr)
data = data.view(np.float32).reshape((len(data), -1))
return data
def data_sparse(filename, feat_type):
# This function takes as argument a file representing a sparse matrix
# sparse_matrix[i][j] = "a:b" means matrix[i][a] = b
# It converts it into a numpy array, using sparse_list_to_array function,
# and returns this array
sparse_list = sparse_file_to_sparse_list(filename)
return sparse_list_to_csr_sparse(sparse_list, len(feat_type))
def data_binary_sparse(filename, feat_type):
# This function takes as an argument a file representing a binary sparse
# matrix
# binary_sparse_matrix[i][j] = a means matrix[i][j] = 1
# It converts it into a numpy array an returns this array.
inner_data = file_to_array(filename)
nbr_samples = len(inner_data)
# the construction is easier w/ dok_sparse
dok_sparse = scipy.sparse.dok_matrix((nbr_samples, len(feat_type)))
print ("Converting {} to dok sparse matrix".format(filename))
for row in range(nbr_samples):
for feature in inner_data[row]:
dok_sparse[row, int(feature) - 1] = 1
print ("Converting {} to csr sparse matrix".format(filename))
return dok_sparse.tocsr()
def file_to_array(filename, verbose=False):
# Converts a file to a list of list of STRING; It differs from
# np.genfromtxt in that the number of columns doesn't need to be constant
data = []
with open(filename, "r") as data_file:
if verbose:
print ("Reading {}...".format(filename))
lines = data_file.readlines()
if verbose:
print ("Converting {} to correct array...".format(filename))
data = [lines[i].strip().split() for i in range(len(lines))]
return data
def read_first_line(filename):
# Read fist line of file
data = []
with open(filename, "r") as data_file:
line = data_file.readline()
data = line.strip().split()
return data
def sparse_file_to_sparse_list(filename, verbose=True):
# Converts a sparse data file to a sparse list, so that:
# sparse_list[i][j] = (a,b) means matrix[i][a]=b
data_file = open(filename, "r")
if verbose:
print ("Reading {}...".format(filename))
lines = data_file.readlines()
if verbose:
print ("Converting {} to correct array")
data = [lines[i].split(' ') for i in range(len(lines))]
if verbose:
print ("Converting {} to sparse list".format(filename))
_converter = lambda a_: (int(a_[0]), np.float32(float(a_[1])))
return [[_converter(data[i][j].rstrip().split(':'))
for j in range(len(data[i])) if data[i][j] != '\n']
for i in range(len(data))]
def sparse_list_to_csr_sparse(sparse_list, nbr_features, verbose=True):
# This function takes as argument a matrix of tuple representing a sparse
# matrix and the number of features.
# sparse_list[i][j] = (a,b) means matrix[i][a]=b
# It converts it into a scipy csr sparse matrix
nbr_samples = len(sparse_list)
# construction easier w/ dok_sparse...
dok_sparse = scipy.sparse.dok_matrix((nbr_samples, nbr_features),
dtype=np.float32)
if verbose:
print ("\tConverting sparse list to dok sparse matrix")
for row in range(nbr_samples):
for column in range(len(sparse_list[row])):
(feature, value) = sparse_list[row][column]
dok_sparse[row, feature - 1] = value
if verbose:
print ("\tConverting dok sparse matrix to csr sparse matrix")
# but csr better for shuffling data or other tricks
return dok_sparse.tocsr()
class CompetitionDataManager(DataManager):
''' This class aims at loading and saving data easily with a cache and at generating a dictionary (self.info) in which each key is a feature (e.g. : name, format, feat_num,...).
Methods defined here are :
__init__ (...)
x.__init__([(feature, value)]) -> void
Initialize the info dictionary with the tuples (feature, value) given as argument. It recognizes the type of value (int, string) and assign value to info[feature]. An unlimited number of tuple can be sent.
getInfo (...)
x.getInfo (filename) -> void
Fill the dictionary with an info file. Each line of the info file must have this format 'feature' : value
The information is obtained from the public.info file if it exists, or inferred from the data files
getInfoFromFile (...)
x.getInfoFromFile (filename) -> void
Fill the dictionary with an info file. Each line of the info file must have this format 'feature' : value
'''
def __init__(self, basename, input_dir, verbose=False, encode_labels=True):
super(CompetitionDataManager, self).__init__()
self.basename = basename
if basename in input_dir:
self.input_dir = input_dir
else:
self.input_dir = input_dir + "/" + basename + "/"
info_file = os.path.join(self.input_dir, basename + '_public.info')
self.getInfo(info_file)
self.feat_type = self.loadType(os.path.join(self.input_dir, basename + '_feat.type'), verbose=verbose)
Xtr = self.loadData(os.path.join(self.input_dir, basename + '_train.data'),
self.info['train_num'], verbose=verbose)
Ytr = self.loadLabel(os.path.join(self.input_dir, basename + '_train.solution'),
self.info['train_num'], verbose=verbose)
Xva = self.loadData(os.path.join(self.input_dir, basename + '_valid.data'),
self.info['valid_num'], verbose=verbose)
Xte = self.loadData(os.path.join(self.input_dir, basename + '_test.data'),
self.info['test_num'], verbose=verbose)
self._data['X_train'] = Xtr
self._data['Y_train'] = Ytr
self._data['X_valid'] = Xva
self._data['X_test'] = Xte
p = os.path.join(self.input_dir, basename + '_valid.solution')
if os.path.exists(p):
try:
self._data['Y_valid'] = self.loadLabel(p,
self.info['valid_num'], verbose=verbose)
except (IOError, OSError):
pass
p = os.path.join(self.input_dir, basename + '_test.solution')
if os.path.exists(p):
try:
self.data['Y_test'] = self.loadLabel(p,
self.info['test_num'], verbose=verbose)
except (IOError, OSError) as e:
pass
if encode_labels:
self.perform1HotEncoding()
def loadData (self, filename, num_points, verbose=True):
''' Get the data from a text file in one of 3 formats: matrix, sparse, binary_sparse'''
if verbose: print("========= Reading " + filename)
start = time.time()
if 'format' not in self.info:
self.getFormatData(filename)
if competition_c_functions_is_there:
data_func = {'dense': competition_c_functions.read_dense_file,
'sparse': competition_c_functions.read_sparse_file,
'sparse_binary': competition_c_functions.read_sparse_binary_file}
data = data_func[self.info['format']](filename, num_points,
self.info['feat_num'])
if scipy.sparse.issparse(data):
if not np.all(data.indices >= 0):
raise ValueError("Sparse data must be 1-indexed, "
"not 0-indexed.")
else:
data_func = {'dense': data_dense,
'sparse': data_sparse,
'sparse_binary': data_binary_sparse}
data = data_func[self.info['format']](filename, self.feat_type)
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return data
def loadLabel (self, filename, num_points, verbose=True):
''' Get the solution/truth values'''
if verbose: print("========= Reading " + filename)
start = time.time()
# IG: Here change to accommodate the new multiclass label format
if competition_c_functions_is_there:
if self.info['task'] == MULTILABEL_CLASSIFICATION:
# cast into ints
label = (competition_c_functions.read_dense_file_unknown_width(
filename, num_points)).astype(np.int)
elif self.info['task'] == MULTICLASS_CLASSIFICATION:
label = competition_c_functions.read_dense_file_unknown_width(
filename, num_points)
# read the class from the only non zero entry in each line!
# should be ints right away
label = np.where(label != 0)[1];
else:
label = competition_c_functions.read_dense_file_unknown_width(
filename, num_points)
else:
if self.info['task'] == MULTILABEL_CLASSIFICATION:
label = self._data(filename)
elif self.info['task'] == MULTICLASS_CLASSIFICATION:
label = data_util.convert_to_num(self._data(filename))
else:
label = np.ravel(data_util.data(filename)) # get a column vector
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return label
def loadType (self, filename, verbose=True):
''' Get the variable types'''
if verbose: print("========= Reading " + filename)
start = time.time()
type_list = []
if os.path.isfile(filename):
if competition_c_functions_is_there:
type_list = competition_c_functions.file_to_array(filename,
verbose=False)
else:
type_list = file_to_array(filename, verbose=False)
else:
n=self.info['feat_num']
type_list = [self.info['feat_type']]*n
type_list = np.array(type_list).ravel()
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return type_list
def getInfo (self, filename, verbose=True):
''' Get all information {attribute = value} pairs from the filename (public.info file),
if it exists, otherwise, output default values'''
if filename==None:
basename = self.basename
input_dir = self.input_dir
else:
# Split away the _public.info (anyway, I don't know why its
# there... the dataset name is known from the call)
basename = "_".join(os.path.basename(filename).split('_')[:-1])
input_dir = os.path.dirname(filename)
if os.path.exists(filename):
self.getInfoFromFile (filename)
print "Info file found : " + os.path.abspath(filename)
# Finds the data format ('dense', 'sparse', or 'sparse_binary')
self.getFormatData(os.path.join(input_dir, basename + '_train.data'))
else:
raise NotImplementedError("The user must always provide an info "
"file.")
self.info['task'] = STRING_TO_TASK_TYPES[self.info['task']]
return self.info
def getInfoFromFile (self, filename):
''' Get all information {attribute = value} pairs from the public.info file'''
with open (filename, "r") as info_file:
lines = info_file.readlines()
features_list = list(map(lambda x: tuple(x.strip("\'").split(" = ")), lines))
for (key, value) in features_list:
self.info[key] = value.rstrip().strip("'").strip(' ')
if self.info[key].isdigit(): # if we have a number, we want it to be an integer
self.info[key] = int(self.info[key])
return self.info
def getFormatData(self,filename):
''' Get the data format directly from the data file (in case we do not have an info file)'''
if 'format' in self.info.keys():
return self.info['format']
if 'is_sparse' in self.info.keys():
if self.info['is_sparse'] == 0:
self.info['format'] = 'dense'
else:
if competition_c_functions_is_there:
data = competition_c_functions.read_first_line(filename)
else:
data = data_util.read_first_line(filename)
if ':' in data[0]:
self.info['format'] = 'sparse'
else:
self.info['format'] = 'sparse_binary'
else:
if competition_c_functions_is_there:
data = competition_c_functions.file_to_array(filename)
else:
data = data_util.file_to_array(filename)
if ':' in data[0][0]:
self.info['is_sparse'] = 1
self.info['format'] = 'sparse'
else:
nbr_columns = len(data[0])
for row in range (len(data)):
if len(data[row]) != nbr_columns:
self.info['format'] = 'sparse_binary'
if 'format' not in self.info.keys():
self.info['format'] = 'dense'
self.info['is_sparse'] = 0
return self.info['format']
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/api/test_types.py | 15 | 3674 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
import pandas
from pandas.core import common as com
from pandas.api import types
from pandas.util import testing as tm
from .test_api import Base
class TestTypes(Base):
allowed = ['is_bool', 'is_bool_dtype',
'is_categorical', 'is_categorical_dtype', 'is_complex',
'is_complex_dtype', 'is_datetime64_any_dtype',
'is_datetime64_dtype', 'is_datetime64_ns_dtype',
'is_datetime64tz_dtype', 'is_datetimetz', 'is_dtype_equal',
'is_extension_type', 'is_float', 'is_float_dtype',
'is_int64_dtype', 'is_integer',
'is_integer_dtype', 'is_number', 'is_numeric_dtype',
'is_object_dtype', 'is_scalar', 'is_sparse',
'is_string_dtype', 'is_signed_integer_dtype',
'is_timedelta64_dtype', 'is_timedelta64_ns_dtype',
'is_unsigned_integer_dtype', 'is_period',
'is_period_dtype', 'is_interval', 'is_interval_dtype',
'is_re', 'is_re_compilable',
'is_dict_like', 'is_iterator', 'is_file_like',
'is_list_like', 'is_hashable',
'is_named_tuple',
'pandas_dtype', 'union_categoricals', 'infer_dtype']
deprecated = ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']
dtypes = ['CategoricalDtype', 'DatetimeTZDtype',
'PeriodDtype', 'IntervalDtype']
def test_types(self):
self.check(types, self.allowed + self.dtypes + self.deprecated)
def check_deprecation(self, fold, fnew):
with tm.assert_produces_warning(DeprecationWarning):
try:
result = fold('foo')
expected = fnew('foo')
assert result == expected
except TypeError:
pytest.raises(TypeError, lambda: fnew('foo'))
except AttributeError:
pytest.raises(AttributeError, lambda: fnew('foo'))
def test_deprecation_core_common(self):
# test that we are in fact deprecating
# the pandas.core.common introspectors
for t in self.allowed:
self.check_deprecation(getattr(com, t), getattr(types, t))
def test_deprecation_core_common_array_equivalent(self):
with tm.assert_produces_warning(DeprecationWarning):
com.array_equivalent(np.array([1, 2]), np.array([1, 2]))
def test_deprecation_core_common_moved(self):
# these are in pandas.core.dtypes.common
l = ['is_datetime_arraylike',
'is_datetime_or_timedelta_dtype',
'is_datetimelike',
'is_datetimelike_v_numeric',
'is_datetimelike_v_object',
'is_datetimetz',
'is_int_or_datetime_dtype',
'is_period_arraylike',
'is_string_like',
'is_string_like_dtype']
from pandas.core.dtypes import common as c
for t in l:
self.check_deprecation(getattr(com, t), getattr(c, t))
def test_removed_from_core_common(self):
for t in ['is_null_datelike_scalar',
'ensure_float']:
pytest.raises(AttributeError, lambda: getattr(com, t))
def test_deprecated_from_api_types(self):
for t in self.deprecated:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(types, t)(1)
def test_moved_infer_dtype():
with catch_warnings(record=True):
e = pandas.lib.infer_dtype('foo')
assert e is not None
| mit |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/test_common.py | 3 | 4870 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import Series, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
import pandas.util.testing as tm
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assert_raises_regex(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert ([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert (a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert (com._random_state(state2).uniform() ==
npr.RandomState(10).uniform())
# check with no arg random state
assert com._random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com._random_state('test')
with pytest.raises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='x'))
assert (matched == 'x')
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='y'))
assert (matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert (matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert (matched == 'y')
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com._dict_compat(data_datetime64) == expected)
assert (com._dict_compat(expected) == expected)
assert (com._dict_compat(data_unchanged) == data_unchanged)
| mit |
adybbroe/atrain_match | atrain_match/reshaped_files_scr/plot_ctth_boxplots_mlvl2_temperature_pressure_height.py | 1 | 16002 | """Read all matched data and make some plotting
"""
import os
import re
from glob import glob
import numpy as np
from matchobject_io import (readCaliopImagerMatchObj,
CalipsoImagerTrackObject)
from plot_kuipers_on_area_util import (PerformancePlottingObject,
ppsMatch_Imager_CalipsoObject)
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
from utils.get_flag_info import get_calipso_clouds_of_type_i
from utils.get_flag_info import (get_semi_opaque_info_pps2014,
get_calipso_high_clouds,
get_calipso_medium_clouds,
get_calipso_low_clouds)
from my_dir import ADIR
def make_boxplot(caObj, name, month="xx", modis_lvl2=False, use_m2_pix=True):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
height_c = 1000*caObj.calipso.all_arrays['layer_top_altitude'][:,0]
cloud_elevation = 1000*caObj.calipso.all_arrays['layer_top_altitude'][:,0]-caObj.calipso.all_arrays['elevation']
if modis_lvl2:
height_imager = caObj.modis.all_arrays['height']
else:
height_imager = caObj.imager.all_arrays['imager_ctth_m_above_seasurface']
if height_imager is None:
height_imager = caObj.imager.all_arrays['ctth_height']+caObj.calipso.all_arrays['elevation']
use = np.logical_and(height_imager >-1,
height_c>=0)
use = np.logical_and(height_imager <45000,use)
USE_ONLY_PIXELS_WHERE_PPS_AND_MODIS_C6_HAVE_VALUES=use_m2_pix
if USE_ONLY_PIXELS_WHERE_PPS_AND_MODIS_C6_HAVE_VALUES:
height_mlvl2 = caObj.modis.all_arrays['height']
height_pps = caObj.imager.all_arrays['imager_ctth_m_above_seasurface']
use = np.logical_and(use, height_mlvl2>-1)
use = np.logical_and(use, height_mlvl2<45000)
use = np.logical_and(use, height_pps>-1)
use = np.logical_and(use, height_pps<45000)
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = height_imager - height_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
MAE = np.mean(abias[c_all])
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-500,500, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-1000,1000, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-1500,1500, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),2000,15000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-15000, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), -10*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-14000,8000)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title("%s MAE = %3.0f"%(name,MAE))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_%s_5_95_filt.png"%(name))
elevation_zero = np.logical_and(use,caObj.calipso.all_arrays['elevation']>5000)
low_clouds = height_c<2500
medium_clouds = np.logical_and(height_c>=2500, height_c<=5000)
high_clouds = height_c>5000
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=50)
ax.fill_between(np.arange(0,8),-500,500, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-1000,1000, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-1500,1500, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),2000,15000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-15000, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), -10*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high], bias[elevation_zero]],whis=[5, 95],sym='',
labels=["low <2.5km","medium","high>5km", "ground>5km"],
showmeans=True, patch_artist=True)
ax.set_ylim(-8000,8000)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title("Calipso %s \nHeight bias comparison MAE= %3.0f"%(name, MAE))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_hkm_%s_5_95_filt.png"%(name))
def make_boxplot_temperature(caObj, name, modis_lvl2=False):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
temp_c = caObj.calipso.all_arrays['layer_top_temperature'][:,0] +273.15
if modis_lvl2:
temp_pps = caObj.modis.all_arrays['temperature']
else:
temp_pps = caObj.imager.all_arrays['ctth_temperature']
if modis_lvl2:
height_pps = caObj.modis.all_arrays['height']
else:
height_pps = caObj.imager.all_arrays['ctth_height']
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
use = np.logical_and(temp_pps >100,
caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
use = np.logical_and(height_pps <45000,use)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = temp_pps - temp_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-2.5,2.5, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-5,5, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-7.5,7.5, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),10,150, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-20,-10, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,-1,1,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*20 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-20,100)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title(name)
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_temperature_%s_5_95_filt.png"%(name))
def make_boxplot_pressure(caObj, name, modis_lvl2=False):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
pressure_c = caObj.calipso.all_arrays['layer_top_pressure'][:,0]
if modis_lvl2:
pressure_pps = caObj.modis.all_arrays['pressure']
else:
pressure_pps = 0.01*caObj.imager.all_arrays['ctth_pressure']
if modis_lvl2:
height_pps = caObj.modis.all_arrays['height']
else:
height_pps = caObj.imager.all_arrays['ctth_height']
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
use = np.logical_and(pressure_pps >0,
caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = pressure_pps - pressure_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-50,50, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-100,100, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-150,150, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),200,2000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-200, facecolor='red', alpha=0.2)
for y_val in [-6,-4,-2,2,4,6,8,-8]:
plt.plot(np.arange(0,8), y_val*100 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-1000,800)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title(name)
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_pressure_%s_5_95_filt.png"%(name))
def investigate_nn_ctth_modis_lvl2():
#november
ROOT_DIR_MODIS_nn_imager = (
ADIR + "/DATA_MISC/reshaped_files/"
"global_modis_14th_created20170324/Reshaped_Files_merged/eos2/1km/2010/%s/*h5")
ROOT_DIR_MODIS_old = (
ADIR + "/DATA_MISC/reshaped_files/"
"global_modis_14th_created20161108/Reshaped_Files/merged/*%s*h5")
for month in [ "06", "09", "01"]:
for ROOT_DIR, name in zip(
[ROOT_DIR_MODIS_nn_imager,
ROOT_DIR_MODIS_nn_imager,
ROOT_DIR_MODIS_old],
["modis_nnIMAGER",
"modis_lvl2_C6",
"modis_CTTHold"]):
name = "%s_%s"%(name, month)
print ROOT_DIR
files = glob(ROOT_DIR%(month))
caObj = CalipsoImagerTrackObject()
for filename in files:
#print filename
caObj += readCaliopImagerMatchObj(filename)
modis_lvl2 = False
if "modis_lvl2" in name:
modis_lvl2 = True
use_m2_pix=True
if "old" in name:
use_m2_pix=False
make_boxplot(caObj, name, month = month, modis_lvl2=modis_lvl2, use_m2_pix=use_m2_pix)
make_boxplot_pressure(caObj, name, modis_lvl2=modis_lvl2)
make_boxplot_temperature(caObj, name, modis_lvl2=modis_lvl2)
if __name__ == "__main__":
investigate_nn_ctth_modis_lvl2()
| gpl-3.0 |
afruizc/microsoft_malware_challenge | src/models/first_model/get_conf_matrix.py | 2 | 2842 | """
This is a script that is used to generate a confussion matrix for
a classification method. This uses 10-k cross_validation with in
order to provide sensible resutls and not overfit.
"""
__author__ = "Andres Ruiz"
__license__ = "Apache"
__email__ = "afruizc __thingy__ cs unm edu"
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix, accuracy_score, log_loss
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import svm_bow
def plot_confusion_matrix(cm, title='Confusion matrix', normalized=True,
cmap=plt.cm.Oranges, save_file=""):
"""
Displays the confussion matrix indicated by `cm`. If argument
`normalized` is Ture, then the matrix is normalized. Optionally
the image can be saved to a file
Arguments:
----------
`cm`: The confusion matrix to be displayed.
`title`: The title for the window.
`normalized`: If True, normalizes the matrix before showing it.
`cmap`: Colormap to use.
`save_file`: If string different than empty, the resulting image is
stored in such file.
"""
if normalized:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if save_file:
plt.savefig(save_file)
def get_indices(data, indices):
result = []
for i in indices:
result.append(data[i])
return result
def main():
e = svm_bow.Executor()
e.load_data()
e.config_model()
fold = KFold(len(e.train['data']), n_folds=10)
conf_mat_avg = np.zeros((9, 9))
c = 0
for train, test in fold:
X_train = get_indices(e.train['data'], train)
X_test = get_indices(e.train['data'], test)
y_train = get_indices(e.train['target'], train)
y_test = get_indices(e.train['target'], test)
c += 1
print("Fitting run {}.".format(c))
model = e.param_tunning.fit(X_train, y_train)
print("Predicting...")
y_pred = model.predict(X_test)
y_pred_prob = model.predict_proba(X_test)
conf_matrix = confusion_matrix(y_test, y_pred)
accruacy = accuracy_score(y_test, y_pred)
loss = log_loss(y_test, y_pred_prob)
plot_confusion_matrix(conf_matrix,
save_file='fold_{}.png'.format(c))
np.savetxt('conf_matrix_fold{}'.format(c), conf_matrix)
print("Fold %d. Accuracy: %lf Loss: %lf" % (c, accruacy, loss))
conf_mat_avg += conf_matrix
np.savetxt('conf_matrix.txt', conf_mat_avg)
conf_mat_avg /= 10.0
plot_confusion_matrix(conf_mat_avg, save_file='final_cm.png')
if __name__ == '__main__':
main()
| apache-2.0 |
carrillo/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
hmtai6/universe_NeonRace-v0 | DQN_breakout/DQN.py | 1 | 9601 | import argparse
import logging
import sys
import gc
import cv2
import matplotlib.pyplot as plt
import gym
import universe # register the universe environments
from universe import wrappers
from collections import deque
from skimage.color import rgb2gray
from skimage.transform import resize
import numpy as np
import tensorflow as tf
import time
import gym, time, random, threading
from keras.models import *
from keras.layers import *
from keras import backend as K
from keras.models import load_model
LEARNING_RATE = 0.005
MOMENTUM = 0.2
MIN_GRAD = 0.0001
ENV_NAME = 'break_out'
SAVE_SUMMARY_PATH = './logs'
SAVE_NETWORK_PATH = './network'
LOAD_NETWOROK = False
INITIAL_REPLAY_SIZE = 200000 # Nb steps for memory, before training
NUM_REPLAY_MEMORY = 400000 # Number of replay memory the agent uses for training
TRAIN_INTERVAL = 1000
GAMMA = 0.99 # Discount factor
STATE_LENGTH = 4 # Number of most recent frames to produce the input to the network
FRAME_WIDTH = 84
FRAME_HEIGHT = 84
class DQN:
def __init__(self, input_shape, nb_actions,
init_epsilon=1.0,
final_epsilon=0.1,
exploration_steps=1000000):
self.input_shape = input_shape
self.nb_actions = nb_actions
self.final_epsilon = final_epsilon
self.epsilon = init_epsilon
self.epsilon_step = (init_epsilon - final_epsilon) / exploration_steps
self.t = 0
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
# create replay memory
self.replay_memory = deque()
# create network
self.state, self.q_vals, self.network = self._build_network()
q_network_weights = self.network.trainable_weights
# create target network
self.state_t, self.q_vals_t, self.network_t = self._build_network()
q_network_weights_t = self.network_t.trainable_weights
# define copy operation
self.update_target_network = [q_network_weights_t[i].assign(q_network_weights[i]) for i in range(len(q_network_weights_t))]
# Define loss and gradient update operation
self.a, self.y, self.loss, self.grads_update = self._build_train_op(q_network_weights)
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(q_network_weights)
self.summary_placeholders, self.update_ops, self.summary_op = self._build_summary()
self.summary_writer = tf.summary.FileWriter(SAVE_SUMMARY_PATH, self.sess.graph)
if not os.path.exists(SAVE_NETWORK_PATH):
os.makedirs(SAVE_NETWORK_PATH)
self.sess.run(tf.global_variables_initializer())
if LOAD_NETWOROK:
self._load_netowrk()
self.sess.run(self.update_target_network)
def _build_network(self):
model = Sequential()
model.add(Conv2D(32, 8, strides=(4, 4), activation='relu', input_shape=[self.input_shape[0], self.input_shape[1], self.input_shape[2]]))
model.add(Conv2D(64, 4, strides=(2, 2), activation='relu'))
model.add(Conv2D(64, 3, strides=(1, 1), activation='relu'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(self.nb_actions))
state = tf.placeholder(tf.float32, [None, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
q_vals = model(state)
return state, q_vals, model
def _build_train_op(self, network_weights):
a = tf.placeholder(tf.int64, [None])
y = tf.placeholder(tf.float32, [None])
# convert into to one hot
a_one_hot = tf.one_hot(a, self.nb_actions, 1.0, 0.)
q_value = tf.reduce_sum(tf.multiply(self.q_vals, a_one_hot), reduction_indices=1)
# clip the error
error = tf.abs(y - q_value)
clipped = tf.clip_by_value(error, 0.0, 1.0)
linear = error - clipped
loss = tf.reduce_mean(0.5 * tf.square(clipped) + linear)
rms_optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, momentum=MOMENTUM, epsilon=MIN_GRAD)
grads_update = rms_optimizer.minimize(loss, var_list=network_weights)
return a, y, loss, grads_update
def get_initial_state(self, observation, last_observation):
processed_observation = np.maximum(observation, last_observation)
processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
state = [processed_observation for _ in range(STATE_LENGTH)]
return np.stack(state, axis=0)
def _build_summary(self):
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
episode_total_reward = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Total Reward/Episode', episode_total_reward)
episode_avg_max_q = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Average Max Q/Episode', episode_avg_max_q)
episode_duration = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Duration/Episode', episode_duration)
episode_avg_loss = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Average Loss/Episode', episode_avg_loss)
summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration, episode_avg_loss]
summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.summary.merge_all()
return summary_placeholders, update_ops, summary_op
def load_network(self):
checkpoint = tf.train.get_checkpoint_state(SAVE_NETWORK_PATH)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print('Successfully loaded: ' + checkpoint.model_checkpoint_path)
else:
print('Training new network...')
def get_action_test(self, state):
return np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
def get_action(self, state):
if self.epsilon >= random.random() or self.t < INITIAL_REPLAY_SIZE:
action = random.randrange(self.nb_actions)
else:
action = np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
# Anneal epsilon linearly over time
if self.epsilon > self.final_epsilon and self.t >= INITIAL_REPLAY_SIZE:
self.epsilon -= self.epsilon_step
return action
def _train(self):
s_batch = []
a_batch = []
r_batch = []
s__batch = []
t_batch = []
y_batch = []
# sample from memory
minibatch = random.sample(self.replay_memory, BATCH_SIZE)
for data in minibatch:
s_batch.append(data[0])
a_batch.append(data[1])
r_batch.append(data[2])
s__batch.append(data[3])
t_batch.append(data[4])
# bool to int
t_batch = np.array(t_batch) + 0
next_actions_batch = np.argmax(self.q_vals.eval(feed_dict={self.s: s__batch}), axis=1)
target_q_values_batch = self.q_vals_t.eval(feed_dict={self.s_t: s__batch})
for i in range(len(minibatch)):
y_batch.append(r_batch[i] + (1 - t_batch[i]) * GAMMA * target_q_values_batch[i][next_actions_batch[i]])
loss, _ = self.sess.run([self.loss, self.grads_update], feed_dict={
self.s: np.float32(np.array(s_batch) / 255.0),
self.a: a_batch,
self.y: y_batch
})
self.total_loss += loss
def add_memory(self, s, a, r, t, s_):
next_state = np.append(s[1:, :, :], s_, axis=0)
# clip reward into -1,1
reward = np.clip(r, -1, 1)
# add into replay memory
self.replay_memory.append((s, a, next_state, t))
if len(self.replay_memory) > NUM_REPLAY_MEMORY :
self.replay_memory.popleft()
if self.t > INITIAL_REPLAY_SIZE:
# train network
if self.t % TRAIN_INTERVAL == 0:
self._train()
# update target network
if self.t % TARGET_UPDATE_INTERVAL == 0:
self.sess.run(self.update_target_network)
# save network
if self.t % SAVE_INTERVAL == 0:
s_path = self.saver.save(self.sess, SAVE_NETWORK_PATH, global_step=self.t)
print('saved network')
self.total_reward += reward
self.total_q_max += np.max(self.q_vals.eval(feed_dict={self.s: [np.float32(s / 255.0)]}))
self.duration += 1
if t:
# write summary
if self.t >= INITIAL_REPLAY_SIZE:
stats = [self.total_reward, self.total_q_max/float(self.duration),
self.duration, self.total_loss/ (float(self.duration)/ float(TRAIN_INTERVAL))]
for i in range(len(stats)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(stats[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.episode + 1)
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode += 1
self.t += 1
return next_state | mit |
trungnt13/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
IamJeffG/geopandas | geopandas/io/tests/test_io.py | 1 | 1794 | from __future__ import absolute_import
import fiona
from geopandas import read_postgis, read_file
from geopandas.tests.util import download_nybb, connect, create_db, \
PANDAS_NEW_SQL_API, unittest, validate_boro_df
class TestIO(unittest.TestCase):
def setUp(self):
nybb_filename, nybb_zip_path = download_nybb()
vfs = 'zip://' + nybb_filename
self.df = read_file(nybb_zip_path, vfs=vfs)
with fiona.open(nybb_zip_path, vfs=vfs) as f:
self.crs = f.crs
def test_read_postgis_default(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con)
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_read_postgis_custom_geom_col(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = """SELECT
borocode, boroname, shape_leng, shape_area,
geom AS __geometry__
FROM nybb;"""
df = read_postgis(sql, con, geom_col='__geometry__')
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_read_file(self):
df = self.df.rename(columns=lambda x: x.lower())
validate_boro_df(self, df)
self.assert_(df.crs == self.crs)
| bsd-3-clause |
libAtoms/matscipy | examples/electrochemistry/pnp_batch/cell_1d/stern_layer_sweep/pnp_plot.py | 2 | 6741 | # positional args
# datadir, figfile, param, param_label
import os.path, re, sys
import numpy as np
from glob import glob
from cycler import cycler
from itertools import cycle
from itertools import groupby
import matplotlib.pyplot as plt
# Ensure variable is defined
try:
datadir
except NameError:
try:
datadir = sys.argv[1]
except:
datadir = 'data'
try:
figfile
except NameError:
try:
figfile = sys.argv[2]
except:
figfile = 'fig.png'
try:
param
except NameError:
try:
param = sys.argv[3]
except:
param = 'c'
try:
param_unit
except NameError:
try:
param_label = sys.argv[4]
except:
param_label = 'c (\mathrm{mM})'
try:
glob_pattern
except NameError:
glob_pattern = os.path.join(datadir, 'NaCl*.txt')
def right_align_legend(leg):
hp = leg._legend_box.get_children()[1]
for vp in hp.get_children():
for row in vp.get_children():
row.set_width(100) # need to adapt this manually
row.mode= "expand"
row.align="right"
# sort file names as normal humans expect
# https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
scientific_number_regex = '([-+]?[\d]+\.?[\d]*(?:[Ee][-+]?[\d]+)?)'
def alpha_num_order(x):
"""Sort the given iterable in the way that humans expect."""
def convert(text):
try:
ret = float(text) # if text.isdigit() else text
except:
ret = text
return ret
return [ convert(c) for c in re.split(scientific_number_regex, x) ]
dat_files = sorted(glob(glob_pattern),key=alpha_num_order)
N = len(dat_files) # number of data sets
M = 2 # number of species
# matplotlib settings
SMALL_SIZE = 8
MEDIUM_SIZE = 12
BIGGER_SIZE = 16
# plt.rc('axes', prop_cycle=default_cycler)
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure titlex
plt.rcParams["figure.figsize"] = (16,10) # the standard figure size
plt.rcParams["lines.linewidth"] = 3
plt.rcParams["lines.markersize"] = 14
plt.rcParams["lines.markeredgewidth"]=1
# line styles
# https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
# linestyle_str = [
# ('solid', 'solid'), # Same as (0, ()) or '-'
# ('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'
# ('dashed', 'dashed'), # Same as '--'
# ('dashdot', 'dashdot')] # Same as '-.'
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
# color maps for potential and concentration plots
cmap_u = plt.get_cmap('Reds')
cmap_c = [plt.get_cmap('Oranges'), plt.get_cmap('Blues')]
# general line style cycler
line_cycler = cycler( linestyle = [ s for _,s in linestyle_tuple ] )
# potential anc concentration cyclers
u_cycler = cycler( color = cmap_u( np.linspace(0.4,0.8,N) ) )
u_cycler = len(line_cycler)*u_cycler + len(u_cycler)*line_cycler
c_cyclers = [ cycler( color = cmap( np.linspace(0.4,0.8,N) ) ) for cmap in cmap_c ]
c_cyclers = [ len(line_cycler)*c_cycler + len(c_cycler)*line_cycler for c_cycler in c_cyclers ]
# https://matplotlib.org/3.1.1/tutorials/intermediate/constrainedlayout_guide.html
fig, (ax1,ax2,ax3) = plt.subplots(
nrows=1, ncols=3, figsize=[24,7], constrained_layout=True)
ax1.set_xlabel('z (nm)')
ax1.set_ylabel('potential (V)')
ax2.set_xlabel('z (nm)')
ax2.set_ylabel('concentration (mM)')
ax3.set_xlabel('z (nm)')
ax3.set_ylabel('concentration (mM)')
# ax1.axvline(x=pnp.lambda_D()*1e9, label='Debye Length', color='grey', linestyle=':')
species_label = [
'$[\mathrm{Na}^+], ' + param_label + '$',
'$[\mathrm{Cl}^-], ' + param_label + '$']
c_regex = re.compile(r'{}_{}'.format(param,scientific_number_regex))
c_graph_handles = [ [] for _ in range(M) ]
for f, u_style, c_styles in zip(dat_files,u_cycler,zip(*c_cyclers)):
print("Processing {:s}".format(f))
# extract nominal concentration from file name
nominal_c = float( c_regex.search(f).group(1) )
dat = np.loadtxt(f,unpack=True)
x = dat[0,:]
u = dat[1,:]
c = dat[2:,:]
c_label = '{:> 4.2g}'.format(nominal_c)
# potential
ax1.plot(x*1e9, u, marker=None, label=c_label, linewidth=1, **u_style)
for i in range(c.shape[0]):
# concentration
ax2.plot(x*1e9, c[i], marker='',
label=c_label, linewidth=2, **c_styles[i])
# semilog concentration
c_graph_handles[i].extend( ax3.semilogy(x*1e9, c[i], marker='',
label=c_label, linewidth=2, **c_styles[i]) )
# legend placement
# https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
u_legend = ax1.legend(loc='center right', title='potential, ${}$'.format(param_label), bbox_to_anchor=(-0.2,0.5) )
first_c_legend = ax3.legend(handles=c_graph_handles[0], title=species_label[0], loc='upper left', bbox_to_anchor=(1.00, 1.02) )
second_c_legend = ax3.legend(handles=c_graph_handles[1], title=species_label[1], loc='lower left', bbox_to_anchor=(1.00,-0.02) )
ax3.add_artist(first_c_legend) # add automatically removed first legend again
c_legends = [ first_c_legend, second_c_legend ]
legends = [ u_legend, *c_legends ]
for l in legends:
right_align_legend(l)
# https://matplotlib.org/3.1.1/tutorials/intermediate/constrainedlayout_guide.html
for l in legends:
l.set_in_layout(False)
# trigger a draw so that constrained_layout is executed once
# before we turn it off when printing....
fig.canvas.draw()
# we want the legend included in the bbox_inches='tight' calcs.
for l in legends:
l.set_in_layout(True)
# we don't want the layout to change at this point.
fig.set_constrained_layout(False)
# fig.tight_layout(pad=3.0, w_pad=2.0, h_pad=1.0)
# plt.show()
fig.savefig(figfile, bbox_inches='tight', dpi=100)
| gpl-2.0 |
jakobworldpeace/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 33 | 17877 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), cluster_module.adjusted_rand_score)
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/kernel_approximation.py | 4 | 23032 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.kernel_approximation import RBFSampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> rbf_feature = RBFSampler(gamma=1, random_state=1)
>>> X_features = rbf_feature.fit_transform(X)
>>> clf = SGDClassifier(max_iter=5)
>>> clf.fit(X_features, y)
... # doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=5,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=None, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> clf.score(X_features, y)
1.0
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.kernel_approximation import SkewedChi2Sampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
... n_components=10,
... random_state=0)
>>> X_features = chi2_feature.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=10)
>>> clf.fit(X_features, y)
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=10,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=None, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> clf.score(X_features, y)
1.0
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than"
" -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.kernel_approximation import AdditiveChi2Sampler
>>> X, y = load_digits(return_X_y=True)
>>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
>>> X_transformed = chi2sampler.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=5, random_state=0)
>>> clf.fit(X_transformed, y)
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=5,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=0, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> clf.score(X_transformed, y) # doctest: +ELLIPSIS
0.9543...
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set the parameters
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
coef0 : float, default=None
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
degree : float, default=None
Degree of the polynomial kernel. Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
Examples
--------
>>> from sklearn import datasets, svm
>>> from sklearn.kernel_approximation import Nystroem
>>> digits = datasets.load_digits(n_class=9)
>>> data = digits.data / 16.
>>> clf = svm.LinearSVC()
>>> feature_map_nystroem = Nystroem(gamma=.2,
... random_state=1,
... n_components=300)
>>> data_transformed = feature_map_nystroem.fit_transform(data)
>>> clf.fit(data_transformed, digits.target)
... # doctest: +NORMALIZE_WHITESPACE
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> clf.score(data_transformed, digits.target) # doctest: +ELLIPSIS
0.9987...
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=None, degree=None,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
for param in (KERNEL_PARAMS[self.kernel]):
if getattr(self, param) is not None:
params[param] = getattr(self, param)
else:
if (self.gamma is not None or
self.coef0 is not None or
self.degree is not None):
warnings.warn(
"Passing gamma, coef0 or degree to Nystroem when using a"
" callable kernel is deprecated in version 0.19 and will"
" raise an error in 0.21, as they are ignored. Use "
"kernel_params instead.", DeprecationWarning)
return params
| bsd-3-clause |
evanthebouncy/nnhmm | uai_network/draw.py | 7 | 2929 | import numpy as np
import matplotlib.pylab as plt
import multiprocessing as mp
from matplotlib import figure
from data import *
FIG = plt.figure()
def draw_coord(coord, name, lab=[1.0, 0.0]):
color = 1.0 if lab[0] > lab[1] else -1.0
ret = np.zeros(shape=[L,L,1])
coord_x, coord_y = coord
coord_x_idx = np.argmax(coord_x)
coord_y_idx = np.argmax(coord_y)
ret[coord_x_idx][coord_y_idx][0] = color
draw(ret, name)
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
def draw_orig(img, name):
ret = np.reshape(img, [L,L,1])
draw(ret, name)
def draw_allob(img, name, ob_prefix):
ret = np.zeros([L,L,1])
for ii in range(L):
for jj in range(L):
labb = img[ii][jj][0] - img[ii][jj][1]
ret[ii][jj][0] = labb
grn_x = []
grn_y = []
red_x = []
red_y = []
for obob in ob_prefix:
ob_c, labb = obob
if labb[0] > labb[1]:
grn_x.append(ob_c[0])
grn_y.append(ob_c[1])
else:
red_x.append(ob_c[0])
red_y.append(ob_c[1])
draw(ret, name, ((grn_y, grn_x), (red_y, red_x)))
def draw_obs(obs, name):
ret_shape = [L, L, 1]
ret = np.zeros(shape=ret_shape)
for ob, lab in obs:
ii, jj = ob
labb = 1.0 if lab[0] > lab[1] else -1.0
# labb = lab[0]
ret[ii][jj][0] = labb
draw(ret, name)
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,L])
ax.set_ylim([0,L])
ax.set_ylim(ax.get_ylim()[::-1])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
def draw_obs_trace(obs, name):
x_coords = []
y_coords = []
anno = []
for i, ob in enumerate(obs):
ob_coord, ob_outcome = ob
x_coords.append(ob_coord[0])
y_coords.append(ob_coord[1])
anno.append("O"+str(i)+str(int(ob_outcome[0])))
draw_annotate(x_coords, y_coords, anno, name)
def draw_all_preds(all_preds, name):
ret_shape = [L, L, 1]
ret = np.zeros(shape=ret_shape)
for qq, labb in all_preds:
i, j = qq
# ret[i][j][0] = 1.0 if labb[0] > labb[1] else 0.0
# ret[i][j][0] = labb[0]
ret[i][j][0] = labb[0]
draw(ret, name)
| mit |
droter/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
Upward-Spiral-Science/claritycontrol | code/scripts/roi_analysis.py | 1 | 2744 | #!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
from __builtin__ import *
import gc
import numpy as np
from skimage.feature import greycomatrix, greycoprops
import matplotlib as mpl
mpl.use('TkAgg') # Solve runtime issue
import matplotlib.pyplot as plt
## Fake imge and label volumes to fast test functionality
def loadImg():
return np.random.random_sample((100,100,100))
def loadAtlas():
atlas_volume = np.zeros((100,100,100),dtype=np.uint32)
atlas_volume[10:50,10:50,10:50]=np.ones((40,40,40),dtype=np.uint32)*1
atlas_volume[50:90,10:50,10:50]=np.ones((40,40,40),dtype=np.uint32)*2
atlas_volume[10:50,50:90,10:50]=np.ones((40,40,40),dtype=np.uint32)*3
atlas_volume[50:90,50:90,10:50]=np.ones((40,40,40),dtype=np.uint32)*4
atlas_volume[10:50,10:50,50:90]=np.ones((40,40,40),dtype=np.uint32)*5
atlas_volume[50:90,10:50,50:90]=np.ones((40,40,40),dtype=np.uint32)*6
atlas_volume[10:50,50:90,50:90]=np.ones((40,40,40),dtype=np.uint32)*7
atlas_volume[50:90,50:90,50:90]=np.ones((40,40,40),dtype=np.uint32)*8
return atlas_volume
## END
## True data
# path = "~/Workspaces/claritycontrol/code/data/raw/"
# token = "Fear199"
# pathname = path+token+".img"
#
# img_volume = nib.load(pathname).get_data()[:,:,:,0]
## END
## get atlas values
atlas_volume = loadAtlas()
print atlas_volume.shape
atlas_values, atlas_count = np.unique(atlas_volume,return_counts=True)
atlas_values = atlas_values[1:] # remove background
## get img
img_volume = loadImg()
print img_volume.shape
class_id = 0 # Fear, Control, Cocaine
subject_id = 199
## normalize volume Z-standardization
img_volume = (img_volume-np.mean(img_volume))/np.std(img_volume)
## prepare results matrix
columns = ['class_id', 'subject_id', 'roi', 'mean', 'std', 'energy', 'entropy', 'correlation', 'contrast', 'variance', 'sumMean',
'inertial', 'clusterShade', 'clusterTendency', 'homogeneity', 'maxProbability', 'inverseVariance']
features = np.zeros((len(atlas_values), len(columns)), dtype=np.float32)
## compute GLCM and properties
for roi_id in range(len(atlas_values)):
features[roi_id, 0] = class_id
features[roi_id, 1] = subject_id
features[roi_id, 2] = atlas_values[roi_id]
## mask img and get roi block
mask_volume = (atlas_volume == atlas_values[roi_id])
xs, ys, zs = mask_volume.nonzero()
roi_block = np.multiply(img_volume, mask_volume)[min(xs):max(xs), min(ys):max(ys), min(zs):max(zs)]
del mask_volume # memory collect
## compute mean and std
features[roi_id, 3] = np.mean(roi_block[roi_block != 0])
features[roi_id, 4] = np.std(roi_block[roi_block != 0])
## compute GLCM and properties
# features[roi_id, 5] = 0
# features[roi_id, 6] = 0
| apache-2.0 |
kostajaitachi/shogun | examples/undocumented/python_modular/graphical/regression_lars.py | 26 | 3327 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RegressionLabels, RealFeatures
from modshogun import LeastAngleRegression, LinearRidgeRegression, LeastSquaresRegression
from modshogun import MeanSquaredError
# we compare LASSO with ordinary least-squares (OLE)
# in the ideal case, the MSE of OLE should coincide
# with LASSO at the end of the path
#
# if OLE is unstable, we may use RidgeRegression (with
# a small regularization coefficient) to simulate OLE
use_ridge = False
np.random.seed(1024)
n = 200
ntrain = 100
p = 7
correlation = 0.6
mean = np.zeros(p)
cov = correlation*np.ones((p,p)) + (1-correlation)*np.eye(p)
Xall = np.random.multivariate_normal(mean, cov, n)
# model is the linear combination of the first three variables plus noise
yall = 2*Xall[:,0] + 5*Xall[:,1] + -3*Xall[:,2] + 0.5*np.random.randn(n)
X = Xall[0:ntrain,:]
y = yall[0:ntrain]
Xtest = Xall[ntrain:,:]
ytest = yall[ntrain:]
# preprocess data
for i in xrange(p):
X[:,i] -= np.mean(X[:,i])
X[:,i] /= np.linalg.norm(X[:,i])
y -= np.mean(y)
# train LASSO
LeastAngleRegression = LeastAngleRegression()
LeastAngleRegression.set_labels(RegressionLabels(y))
LeastAngleRegression.train(RealFeatures(X.T))
# train ordinary LSR
if use_ridge:
lsr = LinearRidgeRegression(0.01, RealFeatures(X.T), Labels(y))
lsr.train()
else:
lsr = LeastSquaresRegression()
lsr.set_labels(RegressionLabels(y))
lsr.train(RealFeatures(X.T))
# gather LASSO path
path = np.zeros((p, LeastAngleRegression.get_path_size()))
for i in xrange(path.shape[1]):
path[:,i] = LeastAngleRegression.get_w(i)
evaluator = MeanSquaredError()
# apply on training data
mse_train = np.zeros(LeastAngleRegression.get_path_size())
for i in xrange(mse_train.shape[0]):
LeastAngleRegression.switch_w(i)
ypred = LeastAngleRegression.apply(RealFeatures(X.T))
mse_train[i] = evaluator.evaluate(ypred, RegressionLabels(y))
ypred = lsr.apply(RealFeatures(X.T))
mse_train_lsr = evaluator.evaluate(ypred, RegressionLabels(y))
# apply on test data
mse_test = np.zeros(LeastAngleRegression.get_path_size())
for i in xrange(mse_test.shape[0]):
LeastAngleRegression.switch_w(i)
ypred = LeastAngleRegression.apply(RealFeatures(Xtest.T))
mse_test[i] = evaluator.evaluate(ypred, RegressionLabels(y))
ypred = lsr.apply(RealFeatures(Xtest.T))
mse_test_lsr = evaluator.evaluate(ypred, RegressionLabels(y))
fig = plt.figure()
ax_path = fig.add_subplot(1,2,1)
plt.plot(xrange(path.shape[1]), path.T, '.-')
plt.legend(['%d' % (x+1) for x in xrange(path.shape[0])])
plt.xlabel('iteration')
plt.title('LASSO path')
ax_tr = fig.add_subplot(2,2,2)
plt.plot(range(mse_train.shape[0])[1:], mse_train[1:], 'k.-')
plt.plot(range(mse_train.shape[0])[1:], np.zeros(mse_train.shape[0])[1:] + mse_train_lsr, 'r-')
plt.legend(('LASSO', 'LeastSquares'))
plt.xlabel('# of non-zero variables')
plt.ylabel('MSE')
plt.title('MSE on training data')
ax_tt = fig.add_subplot(2,2,4)
plt.plot(range(mse_test.shape[0])[1:], mse_test[1:], 'k.-')
plt.plot(range(mse_test.shape[0])[1:], np.zeros(mse_test.shape[0])[1:] + mse_test_lsr, 'r-')
plt.legend(('LASSO', 'LeastSquares'), loc='lower right')
plt.xlabel('# of non-zero variables')
plt.ylabel('MSE')
plt.title('MSE on test data')
plt.show()
| gpl-3.0 |
kmiernik/Pyspectr | bin/spectrum_fitter.py | 1 | 6663 | #!/usr/bin/env python3
"""
K. Miernik 2013
k.a.miernik@gmail.com
GPL v3
Spectrum fitting code
"""
import argparse
import math
import numpy
import os
import sys
import time
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_errors
from Pyspectr.hisfile import HisFile as HisFile
from Pyspectr.peak_fitter import PeakFitter as PeakFitter
from Pyspectr.exceptions import GeneralError as GeneralError
class SpectrumParser:
def __init__(self, file_name):
self.base_name, ext = os.path.splitext(file_name)
if len(ext) > 0 and ext in (".gz", ".his", ".tgz"):
self.file_type = 'his'
self.data_file = HisFile(file_name)
elif len(ext) > 0 and ext in ".txt":
self.file_type = 'txt'
self.data_file = numpy.loadtxt(file_name)
else:
raise GeneralError(
'Files other than txt, his, tgz and gz are not supported')
def parse(self, spectrum, show, pause):
spectra_ids = spectrum.get('id')
id_list = []
if self.file_type == 'his':
for element in spectra_ids.split(','):
element = element.split('-')
if len(element) > 1:
new_elements = []
for i in range(int(element[0]), int(element[1]) + 1):
id_list.append(i)
else:
id_list.append(int(element[0]))
elif self.file_type == 'txt':
if spectra_ids != '':
raise GeneralError('Spectrum id not supported for txt files')
else:
id_list.append('')
peaks = spectrum.findall('peak')
x_min = int(spectrum.get('min'))
x_max = int(spectrum.get('max'))
smin = spectrum.get('smin')
smax = spectrum.get('smax')
for spectrum_id in id_list:
plot_name = '{}_{}'.format(self.base_name, spectrum_id)
PF = PeakFitter(peaks, spectrum.get('baseline'), plot_name)
if self.file_type == 'txt':
data_x = self.data_file[x_min:x_max, 0]
data_y = self.data_file[x_min:x_max, 1]
if self.data_file.shape[1] == 2:
data_dy = []
for y in data_y:
dy = numpy.sqrt(y) if y > 0 else 1.0
data_dy.append(dy)
data_dy = numpy.array(data_dy)
else:
data_dy = self.data_file[x_min:x_max, 2]
for iy, y in enumerate(data_dy):
if y <= 0:
data_dy[iy] = 1.0
elif self.file_type == 'his':
data = self.data_file.load_histogram(spectrum_id)
if data[0] != 1:
raise GeneralError('Only 1D histograms are supported')
data_x = data[1][x_min:x_max]
data_y = data[3][x_min:x_max]
data_dy = []
for y in data_y:
dy = numpy.sqrt(y) if y > 0 else 1.0
data_dy.append(dy)
data_dy = numpy.array(data_dy)
if smin is not None and smax is not None:
width = [float(smin), float(smax)]
else:
width = None
fit_result = PF.fit(data_x, data_y, data_dy, width=width)
if show == 'plot' or show == 'svg':
plt.clf()
plt.xlabel('Channel')
plt.ylabel('Counts')
plt.plot(data_x, data_y, linestyle='steps-mid')
plt.plot(data_x, fit_result['baseline'], linestyle='--')
plt.plot(fit_result['x_axis'], fit_result['fit'], linewidth=1.0)
if show == 'svg':
svg_name = 'fit_{0}_{1}_{2}'.format(plot_name,
int(data_x[0]), int(data_x[-1]))
svg_name = svg_name.replace('.', '').\
replace('/', '') + '.svg'
plt.savefig(svg_name)
else:
plt.show()
plt.draw()
time.sleep(pause)
elif show == 'quiet':
pass
for i, peak in enumerate(peaks):
if peak.get('ignore') == 'True':
continue
x0 = PF.params['x{}'.format(i)].value
dx = PF.params['x{}'.format(i)].stderr
A = PF.params['A{}'.format(i)].value
dA = PF.params['A{}'.format(i)].stderr
s = PF.params['s{}'.format(i)].value
E = peaks[i].get('E')
name = peaks[i].get('name')
if name is None:
name = ""
Area = PF.find_area(data_x, i)
print('{:>8} {:>8} {:>8.2f} {:>8.2f}'.\
format(name, E, x0, dx),
'{:>8.1f} {:>8.1f} {:>8.3f} {:>8.1f}'.\
format(A, dA, s, Area))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('config', nargs=1,
help='Config files')
parser.add_argument('--pause', '-p', nargs=1, type=float, default=[0.5],
help='Pause time in seconds')
out_group = parser.add_mutually_exclusive_group()
out_group.add_argument('--plot', action='store_true',
help='Plot window during fitting')
out_group.add_argument('--svg', action='store_true',
help='SVG files saved during fitting')
out_group.add_argument('--quiet', action='store_true',
help='No output during fitting')
args = parser.parse_args()
show = 'plot'
if args.svg:
show = 'svg'
elif args.quiet:
show = 'quiet'
try:
tree = ET.parse(args.config[0])
except (xml.parsers.expat.ExpatError,
xml.etree.ElementTree.ParseError) as err:
print("File '{0}' parsing error: {1}".format(
args.config[0], err))
exit()
root = tree.getroot()
for data_file in root.findall('data_file'):
SP = SpectrumParser(data_file.get('name'))
print('# File: ', data_file.get('name'))
print('# {: ^8} {:^7} {:^8} {:^8} {:^8} {:^8} {:^8} {:^8}'
.format('Name', 'E', 'x0', 'dx', 'A', 'dA', 's', 'Area'))
for spectrum in data_file.findall('spectrum'):
SP.parse(spectrum, show, args.pause[0])
| gpl-3.0 |
SeldonIO/seldon-server | python/seldon/pipeline/sklearn_transform.py | 2 | 1944 | from collections import defaultdict
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator,TransformerMixin
from seldon.util import DeprecationHelper
class SklearnTransform(BaseEstimator,TransformerMixin):
"""
Allow sklearn transformers to be run on Pandas dataframes.
Parameters
----------
input_features : list str
input columns to use
output_features : list str, optional
names of output columns
transformer : scikit learn Transformer
transformer to run on data
"""
def __init__(self,input_features=None,output_features=None,output_features_prefix=None,transformer=None):
self.input_features=input_features
self.output_features=output_features
self.transformer=transformer
self.output_features_prefix=output_features_prefix
def fit(self,df):
"""
Parameters
----------
df : pandas dataframe
Returns
-------
self: object
"""
self.transformer.fit(df[self.input_features].values)
return self
def transform(self,df):
"""
transform the input columns and merge result into input dataframe using column names if provided
Parameters
----------
df : pandas dataframe
Returns
-------
Transformed pandas dataframe
"""
Y = self.transformer.transform(df[self.input_features].values)
df_Y = pd.DataFrame(Y)
if not self.output_features_prefix is None:
cols = [self.output_features_prefix+"_"+str(c) for c in df_Y.columns]
df_Y.columns = cols
elif not self.output_features is None and len(df_Y.columns) == len(self.output_features):
df_Y.columns = self.output_features
df_2 = pd.concat([df,df_Y],axis=1)
return df_2
sklearn_transform = DeprecationHelper(SklearnTransform) | apache-2.0 |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/minigo/implementations/tensorflow/minigo/oneoffs/embeddings_graphs.py | 8 | 3394 | #!/usr/bin/env python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import subprocess
import time
from absl import app, flags
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from tqdm import tqdm
flags.DEFINE_string('embedding_file', None, 'Where to save the embeddings.')
flags.DEFINE_integer(
'pca_dims', None,
help='None to skip PCA, else number of dimensions to reduce to')
flags.DEFINE_bool(
'produce_pngs', False,
help='if true call sgftopng for all positions')
flags.mark_flag_as_required('embedding_file')
flags.register_validator(
'embedding_file',
lambda ef: ef.endswith('.pickle') and os.path.isfile(ef),
'embedding_file must be an existing .pickle file')
FLAGS = flags.FLAGS
def main(argv):
t0 = time.time()
embedding_file = FLAGS.embedding_file
with open(embedding_file, 'rb') as pickle_file:
metadata, embeddings = pickle.load(pickle_file)
t1 = time.time()
reduced = embeddings
if FLAGS.pca_dims:
pca = PCA(n_components=FLAGS.pca_dims)
pca_result = pca.fit_transform(embeddings)
print('Explained variation per principal component:')
print(pca.explained_variance_ratio_)
print('Total Explained: {:.4f}'.format(
sum(pca.explained_variance_ratio_)))
print()
reduced = pca_result
t2 = time.time()
print('Shape:', reduced.shape)
tsne = TSNE(
n_components=2,
verbose=4,
perplexity=40,
n_iter=2000,
min_grad_norm=5e-5)
coords = tsne.fit_transform(reduced)
assert len(coords.shape) == 2, coords.shape[1] == 2
# scale coords to be [0,1] in both dims
coords -= [min(coords[:, 0]), min(coords[:, 1])]
coords /= max(coords.flatten())
t3 = time.time()
for i, (path, move) in enumerate(tqdm(metadata)):
assert path.endswith('.sgf'), path
png = '{}_{}.png'.format(path[:-4], move)
assert '/eval/' in png, png
png = png.replace('/eval/', '/thumbnails/')
if FLAGS.produce_pngs and not os.path.exists(png):
# NOTE: sgftopng is a pain to install, sorry.
with open(path) as sgf_file:
subprocess.run(
['sgftopng', png, '-' + str(move + 1)],
stdin=sgf_file)
metadata[i] = (path, move, png)
t4 = time.time()
print('Read {:.2f}s, PCA {:.2f}s t-SNE {:.2f}s, PNGs {:.2f}s'.format(
t1 - t0, t2 - t1, t3 - t2, t4 - t3))
new_file = embedding_file.replace('.pickle', '.graph.pickle')
assert new_file != embedding_file, (new_file, embedding_file)
with open(new_file, 'wb') as pickle_file:
pickle.dump([metadata, embeddings, coords], pickle_file)
print('TSNE cords added to', new_file)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
erp12/pyshgp | pyshgp/gp/evaluation.py | 1 | 7737 | """The :mod:`evaluation` module defines classes to evaluate program CodeBlocks."""
from abc import ABC, abstractmethod
from typing import Sequence, Union, Callable
from collections import defaultdict
import numpy as np
import pandas as pd
from pyshgp.push.interpreter import PushInterpreter, Program
from pyshgp.tap import tap
from pyshgp.utils import Token
def damerau_levenshtein_distance(a: Union[str, Sequence], b: Union[str, Sequence]) -> int:
"""Damerau-Levenshtein Distance that works for both strings and lists.
https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance.
This implementation is heavily inspired by the implementation in the
jellyfish package. https://github.com/jamesturk/jellyfish
"""
a_is_str = isinstance(a, str)
b_is_str = isinstance(b, str)
if a_is_str or b_is_str:
assert a_is_str and b_is_str
len1 = len(a)
len2 = len(b)
infinite = len1 + len2
da = defaultdict(int)
score = [[0] * (len2 + 2) for x in range(len1 + 2)]
score[0][0] = infinite
for i in range(0, len1 + 1):
score[i + 1][0] = infinite
score[i + 1][1] = i
for i in range(0, len2 + 1):
score[0][i + 1] = infinite
score[1][i + 1] = i
for i in range(1, len1 + 1):
db = 0
for j in range(1, len2 + 1):
i1 = da[b[j - 1]]
j1 = db
cost = 1
if a[i - 1] == b[j - 1]:
cost = 0
db = j
score[i + 1][j + 1] = min(score[i][j] + cost,
score[i + 1][j] + 1,
score[i][j + 1] + 1,
score[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))
da[a[i - 1]] = i
return score[len1 + 1][len2 + 1]
class Evaluator(ABC):
"""Base class or evaluators.
Parameters
----------
interpreter : PushInterpreter, optional
PushInterpreter used to run program and get their output. Default is
an interpreter with the default configuration and all core instructions
registered.
penalty : float, optional
When a program's output cannot be evaluated on a particular case, the
penalty error is assigned. Default is 5e5.
verbosity_config : Optional[VerbosityConfig] (default = None)
A VerbosityConfig controlling what is logged during evaluation.
Default is no verbosity.
"""
def __init__(self,
interpreter: PushInterpreter = "default",
penalty: float = 1e6):
self.penalty = penalty
if interpreter == "default":
self.interpreter = PushInterpreter()
else:
self.interpreter = interpreter
def default_error_function(self, actuals, expecteds) -> np.array:
"""Produce errors of actual program output given expected program output.
The default error function is intended to be a universal error function
for Push programs which only output a subset of the standard data types.
Parameters
----------
actuals : list
The values produced by running a Push program on a sequences of cases.
expecteds: list
The ground truth values for the sequence of cases used to produce the actuals.
Returns
-------
np.array
An array of error values describing the program's performance.
"""
errors = []
for ndx, actual in enumerate(actuals):
expected = expecteds[ndx]
if actual is Token.no_stack_item:
errors.append(self.penalty)
elif isinstance(expected, (bool, np.bool_)):
errors.append(int(not (bool(actual) == expected)))
elif isinstance(expected, (int, np.int64, float, np.float64)):
try:
errors.append(abs(float(actual) - expected))
except OverflowError:
errors.append(self.penalty)
elif isinstance(expected, str):
errors.append(damerau_levenshtein_distance(str(actual), expected))
elif isinstance(expected, list):
errors += list(self.default_error_function(list(actual), expected))
else:
raise ValueError("Unknown expected type for {e}".format(e=expected))
return np.array(errors)
@tap
@abstractmethod
def evaluate(self, program: Program) -> np.ndarray:
"""Evaluate the program and return the error vector.
Parameters
----------
program
Program (CodeBlock of Push code) to evaluate.
Returns
-------
np.ndarray
The error vector of the program.
"""
pass
class DatasetEvaluator(Evaluator):
"""Evaluator driven by a labeled dataset."""
def __init__(self,
X, y,
interpreter: PushInterpreter = "default",
penalty: float = 1e6):
"""Create Evaluator based on a labeled dataset. Inspired by sklearn.
Parameters
----------
X : list, array-like, or pandas dataframe of shape = [n_samples, n_features]
The inputs to evaluate each program on.
y : list, array-like, or pandas dataframe.
The target values. Shape = [n_samples] or [n_samples, n_outputs]
interpreter : PushInterpreter or {"default"}
The interpreter used to run the push programs.
penalty : float
If no response is given by the program on a given input, assign this
error as the error.
"""
super().__init__(interpreter, penalty)
self.X = pd.DataFrame(X)
self.y = pd.DataFrame(y)
@tap
def evaluate(self, program: Program) -> np.array:
"""Evaluate the program and return the error vector.
Parameters
----------
program
Program (CodeBlock of Push code) to evaluate.
Returns
-------
np.ndarray
The error vector of the program.
"""
super().evaluate(program)
errors = []
for ndx in range(self.X.shape[0]):
inputs = self.X.iloc[ndx].to_list()
expected = self.y.iloc[ndx].to_list()
actual = self.interpreter.run(program, inputs)
errors.append(self.default_error_function(actual, expected))
return np.array(errors).flatten()
class FunctionEvaluator(Evaluator):
"""Evaluator driven by an error function."""
def __init__(self, error_function: Callable):
"""Create Evaluator driven by an error function.
The given error function must take a push program in the form of a
CodeBlock and then return an np.ndarray of numeric errors. These errors
will be used as the program's error vector.
The error functions will typically instantiate its own PushInterpreter
and run the given program as needed.
Parameters
----------
error_function : Callable
A function which takes a program to evaluate and returns a
np.ndarray of errors.
"""
super().__init__()
self.error_function = error_function
@tap
def evaluate(self, program: Program) -> np.ndarray:
"""Evaluate the program and return the error vector.
Parameters
----------
program
Program (CodeBlock of Push code) to evaluate.
Returns
-------
np.ndarray
The error vector of the program.
"""
super().evaluate(program)
return self.error_function(program)
| mit |
madjelan/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
calatre/epidemics_network | treat/excel_clipper.py | 1 | 1352 | # Universidade de Aveiro - Physics Department
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 28/6/2017
# Selecting Data from an excel file to another
#import numpy as np
import pandas as pd
from openpyxl import load_workbook
#r = [0, 301, 302, 303, 304, 305, 306]
#desired = ['S_Avg', 'I_Avg', 'R_Avg', 'S_StD', 'I_StD', 'R_StD']
cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
book = load_workbook('data/ns_shift.xlsx')
writer = pd.ExcelWriter('data/nd_shift.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
for cvar in cvalues:
for rvar in rvalues:
print('retrieving...')
tblnm = 'c='+str(cvar)+'|r='+ str(rvar)
data = pd.read_excel('data/ns_shift.xlsx',
sheetname = tblnm, index_col = 0)
print('...retrieved')
#data.drop(data.columns[r], axis = 1, inplace= True)
sel = data[:1000]
print('copying...............................'+str(tblnm))
sel.to_excel(writer,'c='+str(cvar)+'|r='+ str(rvar))
print('copied!')
writer.save()
| apache-2.0 |
piskvorky/gensim | gensim/sklearn_api/atmodel.py | 4 | 10965 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.atmodel.AuthorTopicModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api.atmodel import AuthorTopicTransformer
>>>
>>> # Pass a mapping from authors to the documents they contributed to.
>>> author2doc = {
... 'john': [0, 1, 2, 3, 4, 5, 6],
... 'jane': [2, 3, 4, 5, 6, 7, 8],
... 'jack': [0, 2, 4, 6, 8]
... }
>>>
>>> # Lets use the model to discover 2 different topics.
>>> model = AuthorTopicTransformer(id2word=common_dictionary, author2doc=author2doc, num_topics=2, passes=100)
>>>
>>> # In which of those 2 topics does jack mostly contribute to?
>>> topic_dist = model.fit(common_corpus).transform('jack')
"""
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class AuthorTopicTransformer(TransformerMixin, BaseEstimator):
"""Base Author Topic module, wraps :class:`~gensim.models.atmodel.AuthorTopicModel`.
The model's internal workings are heavily based on `"The Author-Topic Model for Authors and Documents",
Osen-Zvi et. al 2004 <https://mimno.infosci.cornell.edu/info6150/readings/398.pdf>`_.
"""
def __init__(self, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
Parameters
----------
num_topics : int, optional
Number of requested latent topics to be extracted from the training corpus.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping from a words' ID to the word itself. Used to determine the vocabulary size, as well as for debugging
and topic printing.
author2doc : dict of (str, list of int), optional
Maps an authors name to a list of document IDs where has has contributed.
Either `author2doc` or `doc2author` **must be supplied**.
doc2author : dict of (int, list of str)
Maps a document (using its ID) to a list of author names that contributed to it.
Either `author2doc` or `doc2author` **must be supplied**.
chunksize : int, optional
Number of documents to be processed by the model in each mini-batch.
passes : int, optional
Number of times the model can make a pass over the corpus during training.
iterations : int, optional
Maximum number of times the model before convergence during the M step of the EM algorithm.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to Kappa from `"The Author-Topic Model for Authors
and Documents", Osen-Zvi et. al 2004 <https://mimno.infosci.cornell.edu/info6150/readings/398.pdf>`_.
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to Tau_0 from `"The Author-Topic Model for Authors and Documents", Osen-Zvi et. al 2004
<https://mimno.infosci.cornell.edu/info6150/readings/398.pdf>`_.
alpha : {np.ndarray, str}, optional
Can be set to an 1D array of length equal to the number of expected topics that expresses
our a-priori belief for the each topics' probability.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / topicno`.
* 'auto': Learns an asymmetric prior from the corpus.
eta : {float, np.array, str}, optional
A-priori belief on word probability, this can be:
* scalar for a symmetric prior over topic/word probability,
* vector of length num_words to denote an asymmetric user defined probability for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination,
* the string 'auto' to learn the asymmetric prior from the data.
update_every : int, optional
Number of mini-batches between each model update.
eval_every : int, optional
Number of updates between two log perplexity estimates.
Set to None to disable perplexity estimation.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
serialized : bool, optional
Indicates whether the input corpora to the model are simple in-memory lists (`serialized = False`)
or saved to the hard-drive (`serialized = True`). Note that this behaviour is quite different from
other Gensim models. If your data is too large to fit in to memory, use this functionality.
serialization_path : str, optional
Path to file that used for storing the serialized object, **must be supplied if `serialized = True`**.
An existing file *cannot* be overwritten, either delete the old file or choose a different name.
minimum_probability : float, optional
Topics with a probability lower than this threshold will be filtered out.
random_state : {np.random.RandomState, int}, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.author2doc = author2doc
self.doc2author = doc2author
self.chunksize = chunksize
self.passes = passes
self.iterations = iterations
self.decay = decay
self.offset = offset
self.alpha = alpha
self.eta = eta
self.update_every = update_every
self.eval_every = eval_every
self.gamma_threshold = gamma_threshold
self.serialized = serialized
self.serialization_path = serialization_path
self.minimum_probability = minimum_probability
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : iterable of list of (int, number)
Sequence of documents in BoW format.
Returns
-------
:class:`~gensim.sklearn_api.atmodel.AuthorTopicTransformer`
The trained model.
"""
self.gensim_model = models.AuthorTopicModel(
corpus=X, num_topics=self.num_topics, id2word=self.id2word,
author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,
iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,
update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold,
serialized=self.serialized, serialization_path=self.serialization_path,
minimum_probability=self.minimum_probability, random_state=self.random_state
)
return self
def transform(self, author_names):
"""Infer the topic probabilities for each author.
Parameters
----------
author_names : {iterable of str, str}
Author name or sequence of author names whose topics will be identified.
Returns
-------
numpy.ndarray
Topic distribution for each input author.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of arrays
if not isinstance(author_names, list):
author_names = [author_names]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
topics = [matutils.sparse2full(self.gensim_model[author_name], self.num_topics) for author_name in author_names]
return np.reshape(np.array(topics), (len(author_names), self.num_topics))
def partial_fit(self, X, author2doc=None, doc2author=None):
"""Train model over a potentially incomplete set of documents.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : iterable of list of (int, number)
Sequence of documents in BoW format.
author2doc : dict of (str, list of int), optional
Maps an authors name to a list of document IDs where has has contributed.
Either `author2doc` or `doc2author` **must be supplied**.
doc2author : dict of (int, list of str)
Maps a document (using its ID) to a list of author names that contributed to it.
Either `author2doc` or `doc2author` **must be supplied**.
Returns
-------
:class:`~gensim.sklearn_api.atmodel.AuthorTopicTransformer`
The trained model.
"""
if self.gensim_model is None:
self.gensim_model = models.AuthorTopicModel(
corpus=X, num_topics=self.num_topics, id2word=self.id2word,
author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,
iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,
update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold,
serialized=self.serialized, serialization_path=self.serialization_path,
minimum_probability=self.minimum_probability, random_state=self.random_state
)
self.gensim_model.update(corpus=X, author2doc=author2doc, doc2author=doc2author)
return self
| lgpl-2.1 |
wesleybowman/karsten | project/rawADCPclass.py | 1 | 4107 | from __future__ import division
import numpy as np
import sys
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv, ut_reconstr
#from shortest_element_path import shortest_element_path
#import matplotlib.pyplot as plt
#import matplotlib.tri as Tri
#import matplotlib.ticker as ticker
#import seaborn
import scipy.io as sio
import h5py
from os import path
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class rawADCP:
def __init__(self, filename):
self.QC = ['raw data']
self.load(filename)
self.Params_Stn4_SWNSreport(filename)
self.load_rbrdata()
## set options
self.options = {}
self.options['showPA'] = 1
self.options['showRBRavg'] = 1
## save a flow file in BPformat
#save_FlowFile_BPFormat(fileinfo,adcp,rbr,saveparams,options)
def load(self, filename):
try:
self.mat = sio.loadmat(filename,
struct_as_record=False, squeeze_me=True)
self.adcp = self.mat['adcp']
except NotImplementedError:
self.mat = h5py.File(filename)
self.adcp = self.mat['adcp']
#self.adcp = Struct(**self.mat['adcp'])
def Params_Stn4_SWNSreport(self, filename):
fname = filename.split('/')
filebase = fname[-1].split('_')[0]
self.fileinfo = {}
self.fileinfo['datadir'] = path.join(*fname[:-1]) + '/'
self.fileinfo['ADCP'] = filebase + '_raw'
self.fileinfo['outdir'] = path.join(*fname[:-1]) + '/'
self.fileinfo['flowfile'] = filebase + '_Flow'
self.fileinfo['rbr']= 'station4_grandPassageII_RBRSN_011857.mat'
self.fileinfo['paramfile']= 'Params_Stn4_SWNSreport'
#%% ADCP parameters
self.saveparams = {}
self.saveparams['tmin'] = 209
self.saveparams['tmax'] = 240
self.saveparams['zmin'] = 0
self.saveparams['zmax'] = 20
self.saveparams['approxdepth'] = 15.5
self.saveparams['flooddir'] = 0
self.saveparams['declination'] = -17.25
self.saveparams['lat'] = 44.2605
self.saveparams['lon'] = -66.3354
self.saveparams['dabADCP'] = 0.5
self.saveparams['dabPS'] = -0.6
self.saveparams['rbr_hr_offset'] = 3
def load_rbrdata(self):
rbrFile = self.fileinfo['datadir'] + self.fileinfo['rbr']
try:
rbrMat = sio.loadmat(rbrFile,
struct_as_record=False, squeeze_me=True)
except NotImplementedError:
rbrMat = h5py.File(rbrFile)
rbr = rbrMat['rbr']
rbrout = {}
rbrout['mtime'] = rbr.yd
rbrout['temp'] = rbr.temperature
rbrout['pres'] = rbr.pressure
rbrout['depth'] = rbr.depth
rbrout['mtime'] = rbr.yd
self.rbr = rbrout
if __name__ == '__main__':
#filename = 'GP-120726-BPd_raw.mat'
filename = '140703-EcoEII_database/data/GP-120726-BPd_raw.mat'
data = rawADCP(filename)
#stn = 'GP-120726-BPd';
#%% File information
#fileinfo.datadir = '../data/'; %path to raw data files
#fileinfo.ADCP = [stn '_raw']; %name of ADCP file
#fileinfo.outdir = '../data/'; %path to output directory
#fileinfo.flowfile = [stn,'_Flow']; %name of output file with Flow data
#fileinfo.rbr = ['station4_grandPassageII_RBRSN_011857.mat'];
#fileinfo.paramfile = mfilename;
#
#%% ADCP parameters
#saveparams.tmin = 209; %tmin (year day)
#saveparams.tmax = 240; %tmax (year day)
#saveparams.zmin = 0; %minimum z to include in saves file
#saveparams.zmax = 20;
#saveparams.approxdepth = 15.5; %Approximate depth
#saveparams.flooddir= 0; %Flood direction (relative to true north, CW is positive)
#saveparams.declination = -17.25;%Declination angle
#saveparams.lat = 44.2605; %latitude
#saveparams.lon = -66.3354; %longitude
#saveparams.dabADCP = 0.5; %depth above bottom of ADCP
#saveparams.dabPS = -0.6; %depth above bottom of pressure sensor
#saveparams.rbr_hr_offset = 3; % hour offset to convert rbr time to UTC
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py | 5 | 1998 | """ test the label propagation module """
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| mit |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 1 | 31702 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#{0:02x}{1:02x}{2:02x}'.format(*tuple([round(val*255) for val in rgb]))
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "{0!s}"'.format(s))
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "{0!s}" is unhashable even inside a tuple'.format(str(arg)))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is {0:d}; must be 3 or 4'.format(len(arg)))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "{0!s}"\n{1!s}'.format(str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "{0!s}"\n{1!s}'.format(str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
agoose77/hivesystem | manual/movingpanda/panda-11d.py | 1 | 6687 | import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
mypanda = Spyder.Actor3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
entityname="mypanda")
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
del a, box
class pandawalkhive(bee.inithive):
animation = dragonfly.scene.bound.animation()
walk = dragonfly.std.variable("str")("walk")
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
setPos = dragonfly.scene.bound.setPos()
setHpr = dragonfly.scene.bound.setHpr()
interval = dragonfly.time.interval_time(18)
connect(key_w, interval.start)
connect(key_s, interval.pause)
sequence = dragonfly.time.sequence(4)(8, 1, 8, 1)
connect(interval.value, sequence.inp)
ip1 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0))
connect(sequence.outp1, ip1)
connect(ip1, setPos)
connect(key_w, ip1.start)
connect(key_s, ip1.stop)
ip2 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (180, 0, 0))
connect(sequence.outp2, ip2)
connect(ip2, setHpr)
connect(key_w, ip2.start)
connect(key_s, ip2.stop)
ip3 = dragonfly.time.interpolation("Coordinate")((0, -10, 0), (0, 0, 0))
connect(sequence.outp3, ip3)
connect(ip3, setPos)
connect(key_w, ip3.start)
connect(key_s, ip3.stop)
ip4 = dragonfly.time.interpolation("Coordinate")((180, 0, 0), (0, 0, 0))
connect(sequence.outp4, ip4)
connect(ip4, setHpr)
connect(key_w, ip4.start)
connect(key_s, ip4.stop)
connect(ip4.reach_end, interval.start)
from bee.staticbind import staticbind_baseclass
class pandawalkbind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = pandawalkhive
class camerabindhive(bee.inithive):
interval = dragonfly.time.interval_time(30)
sequence = dragonfly.time.sequence(2)(1, 1)
connect(interval.value, sequence.inp)
startsensor = dragonfly.sys.startsensor()
ip1 = dragonfly.time.interpolation("Coordinate")((180, -20, 0), (360, -20, 0))
ip2 = dragonfly.time.interpolation("Coordinate")((0, -20, 0), (180, -20, 0))
connect(sequence.outp1, ip1.inp)
connect(sequence.outp2, ip2.inp)
connect(startsensor, interval.start)
connect(startsensor, ip1.start)
connect(ip1.reach_end, ip1.stop)
connect(ip1.reach_end, ip2.start)
connect(ip2.reach_end, ip2.stop)
connect(ip2.reach_end, ip1.start)
connect(ip2.reach_end, interval.start)
sethpr = dragonfly.scene.bound.setHpr()
connect(ip1, sethpr)
connect(ip2, sethpr)
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
z_pandawalk = pandawalkbind().worker()
pandaid = dragonfly.std.variable("id")(pandaname_)
connect(pandaid, z_pandawalk.bindname)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
startsensor = dragonfly.sys.startsensor()
cam = dragonfly.scene.get_camera()
camparent = dragonfly.scene.unbound.parent()
connect(cam, camparent.entityname)
connect(camcenter, camparent.entityparentname)
connect(startsensor, camparent)
cphide = dragonfly.scene.unbound.hide()
connect(camcenter, cphide)
connect(startsensor, cphide)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, 45, 25)
wininit.camera.setHpr(180, -20, 0)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
| bsd-2-clause |
smunaut/gnuradio | gr-filter/examples/fir_filter_ccc.py | 6 | 4023 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
harisbal/pandas | pandas/core/reshape/merge.py | 2 | 61308 | """
SQL-style merge routines
"""
import copy
import string
import warnings
import numpy as np
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
import pandas.compat as compat
from pandas.compat import filter, lzip, map, range, zip
from pandas.errors import MergeError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool,
is_bool_dtype, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetimelike, is_dtype_equal, is_float_dtype,
is_int64_dtype, is_int_or_datetime_dtype, is_integer, is_integer_dtype,
is_list_like, is_number, is_numeric_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import isnull, na_value_for_dtype
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timedelta
import pandas.core.algorithms as algos
from pandas.core.arrays.categorical import _recode_for_categories
import pandas.core.common as com
from pandas.core.frame import _merge_doc
from pandas.core.internals import (
concatenate_block_managers, items_overlap_with_suffix)
import pandas.core.sorting as sorting
from pandas.core.sorting import is_int64_overflow_possible
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator,
validate=validate)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except KeyError:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
group key lvalue rvalue
0 a a 1 NaN
1 a b 1 1.0
2 a c 2 2.0
3 a d 2 3.0
4 a e 3 3.0
5 b a 1 NaN
6 b b 1 1.0
7 b c 2 2.0
8 b d 2 3.0
9 b e 3 3.0
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
left = validate_operand(left)
right = validate_operand(right)
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com.maybe_make_list(on)
self.left_on = com.maybe_make_list(left_on)
self.right_on = com.maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{left_index}'.format(left_index=type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{right_index}'.format(right_index=type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({left} levels on the left, {right} on the right)'
).format(left=left.columns.nlevels,
right=right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
# If argument passed to validate,
# check if columns specified as unique
# are in fact unique.
if validate is not None:
self._validate(validate)
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
self._maybe_restore_index_levels(result)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {name}"
.format(name=i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_restore_index_levels(self, result):
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(self.join_names,
self.left_on,
self.right_on):
if (self.orig_left._is_level_reference(left_key) and
self.orig_right._is_level_reference(right_key) and
name not in result.index.names):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if result._is_label_reference(name):
result[name] = key_col
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
idx_list = [result.index.get_level_values(level_name)
if level_name != name else key_col
for level_name in result.index.names]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or 'key_{i}'.format(i=i), key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: is_array_like(x) and len(x) == len(left)
is_rkey = lambda x: is_array_like(x) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(
right._get_label_or_level_values(rk))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(
right._get_label_or_level_values(rk))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left._get_label_or_level_values(lk))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
self.right = self.right._drop_labels_or_levels(right_drop)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergees but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
lk_is_cat = is_categorical_dtype(lk)
rk_is_cat = is_categorical_dtype(rk)
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if lk_is_cat and rk_is_cat:
if lk.is_dtype_equal(rk):
continue
elif lk_is_cat or rk_is_cat:
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
msg = ("You are trying to merge on {lk_dtype} and "
"{rk_dtype} columns. If you wish to proceed "
"you should use pd.concat".format(lk_dtype=lk.dtype,
rk_dtype=rk.dtype))
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8, int and float
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk) and is_numeric_dtype(rk):
if lk.dtype.kind == rk.dtype.kind:
pass
# check whether ints and floats
elif is_integer_dtype(rk) and is_float_dtype(lk):
if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
warnings.warn('You are merging on int and float '
'columns where the float values '
'are not equal to their int '
'representation', UserWarning)
elif is_float_dtype(rk) and is_integer_dtype(lk):
if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
warnings.warn('You are merging on int and float '
'columns where the float values '
'are not equal to their int '
'representation', UserWarning)
# let's infer and see if we are ok
elif lib.infer_dtype(lk) == lib.infer_dtype(rk):
pass
# Check if we are trying to merge on obviously
# incompatible dtypes GH 9780, GH 15800
# boolean values are considered as numeric, but are still allowed
# to be merged on object boolean values
elif ((is_numeric_dtype(lk) and not is_bool_dtype(lk))
and not is_numeric_dtype(rk)):
raise ValueError(msg)
elif (not is_numeric_dtype(lk)
and (is_numeric_dtype(rk) and not is_bool_dtype(rk))):
raise ValueError(msg)
elif is_datetimelike(lk) and not is_datetimelike(rk):
raise ValueError(msg)
elif not is_datetimelike(lk) and is_datetimelike(rk):
raise ValueError(msg)
elif is_datetime64tz_dtype(lk) and not is_datetime64tz_dtype(rk):
raise ValueError(msg)
elif not is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
raise ValueError(msg)
# Houston, we have a problem!
# let's coerce to object if the dtypes aren't
# categorical, otherwise coerce to the category
# dtype. If we coerced categories to object,
# then we would lose type information on some
# columns, and end up trying to merge
# incompatible dtypes. See GH 16900.
else:
if name in self.left.columns:
typ = lk.categories.dtype if lk_is_cat else object
self.left = self.left.assign(
**{name: self.left[name].astype(typ)})
if name in self.right.columns:
typ = rk.categories.dtype if rk_is_cat else object
self.right = self.right.assign(
**{name: self.right[name].astype(typ)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError(
'No common columns to perform merge on. '
'Merge options: left_on={lon}, right_on={ron}, '
'left_index={lidx}, right_index={ridx}'
.format(lon=self.left_on, ron=self.right_on,
lidx=self.left_index, ridx=self.right_index))
if not common_cols.is_unique:
raise MergeError("Data columns not unique: {common!r}"
.format(common=common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _validate(self, validate):
# Check uniqueness of each
if self.left_index:
left_unique = self.orig_left.index.is_unique
else:
left_unique = MultiIndex.from_arrays(self.left_join_keys
).is_unique
if self.right_index:
right_unique = self.orig_right.index.is_unique
else:
right_unique = MultiIndex.from_arrays(self.right_join_keys
).is_unique
# Check data integrity
if validate in ["one_to_one", "1:1"]:
if not left_unique and not right_unique:
raise MergeError("Merge keys are not unique in either left"
" or right dataset; not a one-to-one merge")
elif not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
" not a one-to-one merge")
elif not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a one-to-one merge")
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
"not a one-to-many merge")
elif validate in ["many_to_one", "m:1"]:
if not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a many-to-one merge")
elif validate in ['many_to_many', 'm:m']:
pass
else:
raise ValueError("Not a valid argument for validate")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction):
name = 'asof_join_{dir}'.format(dir=direction)
return getattr(libjoin, name, None)
def _asof_by_function(direction):
name = 'asof_join_{dir}_on_X_by_Y'.format(dir=direction)
return getattr(libjoin, name, None)
_type_casters = {
'int64_t': ensure_int64,
'double': ensure_float64,
'object': ensure_object,
}
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: {direction}'
.format(direction=self.direction))
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys [{i}] {lkdtype} and "
"{rkdtype}, must be the same type"
.format(i=i, lkdtype=lk.dtype,
rkdtype=rk.dtype))
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = ("incompatible tolerance {tolerance}, must be compat "
"with type {lkdtype}".format(
tolerance=type(self.tolerance),
lkdtype=lt.dtype))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
elif is_float_dtype(lt):
if not is_number(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer, timestamp or float")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
msg = "allow_exact_matches must be boolean, passed {passed}"
raise MergeError(msg.format(passed=self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we require sortedness and non-null values in the join keys
msg_sorted = "{side} keys must be sorted"
msg_missings = "Merge keys contain null values on {side} side"
if not Index(left_values).is_monotonic:
if isnull(left_values).any():
raise ValueError(msg_missings.format(side='left'))
else:
raise ValueError(msg_sorted.format(side='left'))
if not Index(right_values).is_monotonic:
if isnull(right_values).any():
raise ValueError(msg_missings.format(side='right'))
else:
raise ValueError(msg_sorted.format(side='right'))
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
func = _asof_by_function(self.direction)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
func = _asof_function(self.direction)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
ensure_int64(left_key),
ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to factorize on codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
klass = libhashtable.Int64Factorizer
if lk.categories.equals(rk.categories):
rk = rk.codes
else:
# Same categories in different orders -> recode
rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)
lk = ensure_int64(lk.codes)
rk = ensure_int64(rk)
elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = ensure_int64(com.values_from_object(lk))
rk = ensure_int64(com.values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = ensure_object(lk)
rk = ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
llength = len(left)
labels = np.concatenate([left, right])
_, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = ensure_int64(new_labels)
new_left, new_right = new_labels[:llength], new_labels[llength:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
with np.errstate(divide='ignore'):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and com._any_not_none(*x)
def validate_operand(obj):
if isinstance(obj, DataFrame):
return obj
elif isinstance(obj, Series):
if obj.name is None:
raise ValueError('Cannot merge a Series without a name')
else:
return obj.to_frame()
else:
raise TypeError('Can only merge Series or DataFrame objects, '
'a {obj} was passed'.format(obj=type(obj)))
| bsd-3-clause |
ajm/pulp | explore/management/commands/okapibm25.py | 2 | 2542 | # This file is part of PULP.
#
# PULP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PULP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PULP. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import csr_matrix
from explore.models import Article
from explore.utils import *
import numpy as np
class Command(BaseCommand) :
args = 'no arguments'
help = '(re)builds the okapibm25 matrix'
def handle(self, *args, **options) :
articles = Article.objects.all()
N = len(articles)
v = CountVectorizer(min_df=10, max_df=0.5, stop_words=get_stop_words())
self.stdout.write("Running TFIDF on %d articles... " % N, ending='\n')
self.stdout.flush()
tf = v.fit_transform(build_corpus())
self.stdout.write("Running OKAPI BM25 on %d articles... " % N, ending='\n')
self.stdout.flush()
# free parameters
k1 = 1.2 # from [1.2, 2.0]
b = 0.75
D = tf.sum(axis=1)
# TF
tf_num = (tf * (k1 + 1))
# make sure everything is CSR
tf_tmp = csr_matrix(k1 * (1 - b + b * (D / D.mean())))
# mask to ensure matrix is sparse
tf_mask = tf.copy()
tf_mask[tf_mask != 0] = 1
tf_den = tf + tf_mask.multiply(tf_tmp)
# avoid NaN in element-wise divide
tf_num.sort_indices()
tf_den.sort_indices()
tf_num.data = np.divide(tf_num.data, tf_den.data)
# IDF
n = np.bincount(tf.nonzero()[1])
idf_term = np.log((N - n + 0.5) / (n + 0.5))
# bm25 should still be sparse
bm25 = tf_num.multiply(csr_matrix(idf_term))
print type(bm25), bm25.shape
self.stdout.write("done\n")
self.stdout.write("writing to disk...\n")
features = v.get_feature_names()
save_sparse_bm25(bm25)
save_features_bm25(dict([ (y,x) for x,y in enumerate(features) ]))
self.stdout.write("done\n")
| gpl-3.0 |
Aasmi/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
xzturn/tensorflow | tensorflow/lite/micro/examples/micro_speech/apollo3/compare_1k.py | 9 | 5012 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugging script for checking calculation values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import matplotlib.pyplot as plt
import numpy as np
# import soundfile as sf
def new_data_to_array(fn, datatype='int16'):
"""Converts file information to an in-memory array."""
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
if datatype == 'int8':
typestr = 'b'
arraylen = int(len(b))
elif datatype == 'int16':
typestr = 'h'
arraylen = int(len(b) // 2)
elif datatype == 'int32':
typestr = 'i'
arraylen = int(len(b) // 4)
if datatype == 'uint8':
typestr = 'B'
arraylen = int(len(b))
elif datatype == 'uint16':
typestr = 'H'
arraylen = int(len(b) // 2)
elif datatype == 'uint32':
typestr = 'I'
arraylen = int(len(b) // 4)
y = np.array(struct.unpack('<' + typestr * arraylen, b))
return y
# x is the fixed-point input in Qm.n format
def to_float(x, n):
return x.astype(float) * 2**(-n)
micro_windowed_input = new_data_to_array(
'micro_windowed_input.txt', datatype='int32')
cmsis_windowed_input = new_data_to_array(
'cmsis_windowed_input.txt', datatype='int16')
micro_dft = new_data_to_array('micro_dft.txt', datatype='int32')
cmsis_dft = new_data_to_array('cmsis_dft.txt', datatype='int16')
py_dft = np.fft.rfft(to_float(cmsis_windowed_input, 15), n=512)
py_result = np.empty((2 * py_dft.size), dtype=np.float)
py_result[0::2] = np.real(py_dft)
py_result[1::2] = np.imag(py_dft)
micro_power = new_data_to_array('micro_power.txt', datatype='int32')
cmsis_power = new_data_to_array('cmsis_power.txt', datatype='int16')
py_power = np.square(np.abs(py_dft))
micro_power_avg = new_data_to_array('micro_power_avg.txt', datatype='uint8')
cmsis_power_avg = new_data_to_array('cmsis_power_avg.txt', datatype='uint8')
plt.figure(1)
plt.subplot(311)
plt.plot(micro_windowed_input, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_windowed_input, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_windowed_input, 30), label='Micro to float')
plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS to float')
plt.legend()
plt.figure(2)
plt.subplot(311)
plt.plot(micro_dft, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_dft, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_dft, 22), label='Micro to float')
# CMSIS result has 6 fractionanl bits (not 7) due to documentation error (see
# README.md)
plt.plot(to_float(cmsis_dft, 6), label='CMSIS to float')
plt.plot(py_result, label='Python result')
plt.legend()
plt.figure(3)
plt.subplot(311)
plt.plot(micro_power, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_power[0:256], label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_power, 22), label='Micro to float')
plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS to float')
plt.plot(py_power, label='Python result')
plt.legend()
plt.figure(4)
plt.plot(micro_power_avg, label='Micro fixed')
plt.plot(cmsis_power_avg, label='CMSIS fixed')
plt.legend()
plt.show()
# t = np.arange(16000.*0.03)/16000.
# # Factor of 10 because micro preprocessing overflows otherwise
# sin1k = 0.1*np.sin(2*np.pi*1000*t)
#
# plt.figure(1)
# plt.subplot(511)
# plt.plot(sin1k)
# plt.title('Input sine')
#
# plt.subplot(512)
# plt.plot(to_float(micro_windowed_input, 30), label='Micro-Lite')
# plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS')
# plt.title('Windowed sine')
# plt.legend(loc='center right')
#
# plt.subplot(513)
# plt.plot(to_float(micro_dft, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_dft, 6), label='CMSIS')
# plt.title('FFT')
# plt.legend(loc='center')
#
# plt.subplot(514)
# plt.plot(to_float(micro_power, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS')
# plt.title('|FFT|^2')
# plt.legend(loc='center right')
#
# plt.subplot(515)
# plt.plot(micro_power_avg, label='Micro-Lite')
# plt.plot(cmsis_power_avg, label='CMSIS')
# plt.title('Averaged |FFT|^2')
# plt.legend(loc='center right')
#
# plt.tight_layout(pad=0, w_pad=0.2, h_pad=0.2)
#
# plt.show()
#
| apache-2.0 |
djgagne/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
natasasdj/OpenWPM | analysis/05_images_pixels.py | 2 | 6264 | import os
import sqlite3
import pandas as pd
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from statsmodels.distributions.empirical_distribution import ECDF
def thousands(x, pos):
if x>=1e9:
return '%dB' % (x*1e-9)
elif x>=1e6:
return '%dM' % (x*1e-6)
elif x>=1e3:
return '%dK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
res_dir = res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
db = res_dir + 'images.sqlite'
conn = sqlite3.connect(db)
query = 'SELECT * FROM Images'
df = pd.read_sql_query(query,conn)
df['pixels']=map(int,df['pixels'])
df['pixels'].max() #178,560,000
df['pixels'].isnull().sum() #2,797,214
df['pixels'].isnull().sum()/float(df.shape[0])*100 #8.8%
pixels = df['pixels'].fillna(-1).map(int)
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k/'
(x,y) = ecdf_for_plot(pixels)
plt.figure()
plt.step(x,y)
plt.title('CDF of the total number of pixels')
plt.xlabel('total number of pixels')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'04a_pix_distr.png'))
plt.show()
grouped = df.groupby('pixels')
s_pix_count = grouped.size()
s_pix_count_=s_pix_count/float(df.shape[0])*100
df_pix_count = pd.DataFrame(s_pix_count,columns=['count'])
# count of total number of pixels
fig,ax=plt.subplots()
plt.scatter(s_pix_count.index,s_pix_count,marker='.',color='darkblue')
#s_pix_count_lim = s_pix_count[s_pix_count > 0.0001*df.shape[0]]
#plt.scatter(s_pix_count_lim.index,s_pix_count_lim, marker='.',color='lightblue')
plt.xscale('symlog')
plt.yscale('log')
plt.xlabel('total number of pixels')
plt.ylabel('number of images')
plt.xlim([-1,1e8])
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_count.png',format='png')
fig.savefig(fig_dir + 'pix_count.eps',format='eps')
fig,ax=plt.subplots()
plt.scatter(s_pix_count_.index,s_pix_count_,marker='.',color='darkblue')
plt.xlabel('total number of pixels')
plt.ylabel('percentage of total number of images')
plt.xscale('symlog')
plt.yscale('log')
plt.xlim([-1,1e8])
plt.ylim([1e-6,1e2])
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_perc.png',format='png')
fig.savefig(fig_dir + 'pix_perc.eps',format='eps')
# Top 20 size counts of images
s_pix_count_sort = s_pix_count.sort_values(ascending=False)
s_pix_perc_sort = s_pix_count_sort/float(df.shape[0])*100
x=range(1,21)
labels = map(str,[ int(a) for a in list(s_pix_count_sort.index[0:20]) ])
fig, ax = plt.subplots()
plt.bar(x,s_pix_count_sort.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('count')
plt.xlabel('total number of pixels')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_count_top20.png',format='png')
fig.savefig(fig_dir + 'pix_count_top20.eps',format='eps')
x=range(1,21)
labels = map(str,[ int(a) for a in list(s_pix_perc_sort.index[0:20]) ])
fig, ax = plt.subplots()
plt.bar(x,s_pix_perc_sort.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('percentage of total number of images')
plt.xlabel('total number of pixels')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_perc_top20.png',format='png')
fig.savefig(fig_dir + 'pix_perc_top20.eps',format='eps')
#s=df['size'][df['size']!=df['cont_length']]
#l=s.tolist()
#df['pixels'].fillna(value=-100,inplace=True)
'''
grouped = df.groupby(['pixels','type'])
s_pix_type_count = grouped.size()
df_pix_type_count = pd.DataFrame(s_type_count,columns=['count'])
'''
# scatter plot no of pixels vs size with the color showing count of a pixel-size pair
grouped = df.groupby(['pixels','size'])
pix_size_count = grouped.size().sort_values()
pixels = pix_size_count.index.get_level_values(level='pixels')
size = pix_size_count.index.get_level_values(level='size')
fig,ax=plt.subplots()
plt.scatter(pixels,size,c=pix_size_count,cmap="Reds", norm=LogNorm(),edgecolors='none')
cbar = plt.colorbar()
cbar.set_label('count of images')
plt.grid(True)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim([-1,1e8])
plt.ylim([-1,1e8])
plt.xlabel('total no of pixels')
plt.ylabel('file size [bytes]')
#plt.show()
fig.savefig(fig_dir + 'pix_size_count.png',format='png')
fig.savefig(fig_dir + 'pix_size_count.eps',format='eps')
fig,ax=plt.subplots()
plt.scatter(pixels,size,c=pix_size_count/float(df.shape[0])*100,cmap="Reds", norm=LogNorm(),edgecolors='none')
cbar = plt.colorbar()
cbar.set_label('percentage of total number of images')
plt.grid(True)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim([-1,1e8])
plt.ylim([-1,1e8])
plt.xlabel('total no of pixels')
plt.ylabel('file size [bytes]')
#plt.show()
fig.savefig(fig_dir + 'pix_size_perc.png',format='png')
fig.savefig(fig_dir + 'pix_size_perc.eps',format='eps')
# top 20 pixel size count
pix_size_count.sort_values(ascending = False,inplace = True)
x=range(1,21)
labels = map(str,[(int(a),int(b)) for (a,b) in pix_size_count.index[0:20]])
fig, ax = plt.subplots()
plt.bar(x,pix_size_count.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('count')
plt.xlabel('total number of pixels, file size [bytes]')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_size_count_top20.png',format='png')
fig.savefig(fig_dir + 'pix_size_count_top20.eps',format='eps')
fig, ax = plt.subplots()
plt.bar(x,pix_size_count.iloc[0:20]/float(df.shape[0])*100,align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('percentage of total number of images')
plt.xlabel('total number of pixels, file size [bytes]')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_size_perc_top20.png',format='png')
fig.savefig(fig_dir + 'pix_size_perc_top20.eps',format='eps')
| gpl-3.0 |
semiautomaticgit/SemiAutomaticClassificationPlugin | semiautomaticclassificationplugin.py | 1 | 86616 | # -*- coding: utf-8 -*-
'''
/**************************************************************************************************************************
SemiAutomaticClassificationPlugin
The Semi-Automatic Classification Plugin for QGIS allows for the supervised classification of remote sensing images,
providing tools for the download, the preprocessing and postprocessing of images.
-------------------
begin : 2012-12-29
copyright : (C) 2012-2021 by Luca Congedo
email : ing.congedoluca@gmail.com
**************************************************************************************************************************/
/**************************************************************************************************************************
*
* This file is part of Semi-Automatic Classification Plugin
*
* Semi-Automatic Classification Plugin is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software Foundation,
* version 3 of the License.
*
* Semi-Automatic Classification Plugin is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* Semi-Automatic Classification Plugin. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************************************************************/
'''
global PluginCheck
PluginCheck = 'Yes'
import os
import sys
try:
from .core import config as cfg
except:
PluginCheck = 'No'
# try importing different path
from PyQt5.QtCore import QSettings
rK = QSettings()
mPythonSettings = rK.value(cfg.regPythonModulesPathSettings, str(cfg.PythonModulesPathSettings))
if len(mPythonSettings) > 0:
for ppS in mPythonSettings.split(';'):
if len(ppS) > 0:
sys.path.insert(1, ppS)
import platform
import inspect
import shutil
import time
import datetime
import subprocess
import numpy as np
import urllib
import requests
import ssl
import smtplib
import gc
from http.cookiejar import CookieJar
import itertools
import zipfile
import tarfile
import base64
import random
import re
import xml.etree.cElementTree as ET
from xml.dom import minidom
import json
import hashlib
import ctypes
import shlex
from collections import Counter
import multiprocessing as mp
try:
mp.set_start_method('spawn')
except:
pass
from multiprocessing import Pool, Manager
# Import the PyQt libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QObject, QFileInfo, QSettings, QDir, QDate, QVariant, pyqtSignal
from PyQt5.QtWidgets import QApplication, QTreeWidgetItem
from PyQt5.QtNetwork import QNetworkRequest
# Import the QGIS libraries
import qgis.core as qgisCore
import qgis.gui as qgisGui
import qgis.utils as qgisUtils
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
# Initialize Qt ui
from .ui.resources_rc import *
from .ui.ui_semiautomaticclassificationplugin import Ui_SemiAutomaticClassificationPlugin
from .ui.ui_semiautomaticclassificationplugin_welcome import Ui_SCP_Welcome
from .ui.semiautomaticclassificationplugindialog import SemiAutomaticClassificationPluginDialog
from .ui.semiautomaticclassificationplugindialog import SpectralSignatureDialog
from .ui.semiautomaticclassificationplugindialog import WelcomeDialog
from .ui.semiautomaticclassificationplugindialog import ScatterPlotDialog
from .ui.semiautomaticclassificationplugindialog import DockClassDialog
# Import plugin version
from .__init__ import version as semiautomaticclassVersion
# required by other modules
cfg.QObjectSCP = QObject
cfg.pyqtSignalSCP = pyqtSignal
if PluginCheck == 'Yes':
try:
from .core.messages import Messages as msgs
from .core.utils import Utils
from .core.signature_importer import Signature_Importer
from .maininterface.downloadproductpointer import DownloadProductPointer
from .maininterface.downloadproducts import DownloadProducts
from .spectralsignature.spectralsignatureplot import SpectralSignaturePlot
from .spectralsignature.scatter_plot import Scatter_Plot
from .dock.manualroi import ManualROI
from .dock.regionroi import RegionROI
from .dock.scpdock import SCPDock
from .dock.classificationpreview import ClassificationPreview
from .maininterface.multipleroiTab import MultipleROITab
from .spectralsignature.usgs_spectral_lib import USGS_Spectral_Lib
from .maininterface.landsatTab import LandsatTab
from .maininterface.asterTab import ASTERTab
from .maininterface.modisTab import MODISTab
from .maininterface.sentinel1Tab import Sentinel1Tab
from .maininterface.sentinel2Tab import Sentinel2Tab
from .maininterface.sentinel3Tab import Sentinel3Tab
from .maininterface.GOESTab import GOESTab
from .maininterface.accuracy import Accuracy
from .maininterface.crossclassificationTab import CrossClassification
from .maininterface.bandcombination import BandCombination
from .maininterface.splitTab import SplitTab
from .maininterface.reprojectrasterbands import ReprojectRasterBands
from .maininterface.pcaTab import PcaTab
from .maininterface.clusteringTab import ClusteringTab
from .maininterface.classSignatureTab import ClassSignatureTab
from .maininterface.zonalStatRasterTab import ZonalStatRasterTab
from .maininterface.vectortorasterTab import VectorToRasterTab
from .maininterface.bandsetTab import BandsetTab
from .maininterface.algorithmWeightTab import AlgWeightTab
from .maininterface.signatureThresholdTab import SigThresholdTab
from .maininterface.LCSignatureThresholdTab import LCSigThresholdTab
from .maininterface.rgblistTab import RGBListTab
from .maininterface.bandsetlistTab import BandSetListTab
from .maininterface.LCSignaturePixel import LCSigPixel
from .maininterface.LCSignaturePixel2 import LCSigPixel2
from .maininterface.bandcalcTab import BandCalcTab
from .maininterface.batchTab import BatchTab
from .maininterface.clipmultiplerasters import ClipMultipleRasters
from .maininterface.stackrasterbands import StackRasterBands
from .maininterface.mosaicbandsets import MosaicBandSets
from .maininterface.cloudmasking import CloudMasking
from .maininterface.spectraldistancebandsets import SpectralDistanceBandsets
from .maininterface.randomForestTab import ClassRandomForestTab
from .maininterface.editraster import EditRaster
from .maininterface.sieveTab import SieveRaster
from .maininterface.erosionTab import ErosionRaster
from .maininterface.dilationTab import DilationRaster
from .maininterface.neighborpixelsTab import NeighborPixels
from .maininterface.clipmultiplerasterspointer import ClipMultiplerastersPointer
from .maininterface.landcoverchange import LandCoverChange
from .maininterface.classreportTab import ClassReportTab
from .maininterface.classificationTab import ClassificationTab
from .maininterface.classtovectorTab import ClassToVectorTab
from .maininterface.reclassificationTab import ReclassificationTab
from .maininterface.settings import Settings
from .core.input import Input
from .ui.ui_utils import Ui_Utils
except:
PluginCheck = 'No'
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please, restart QGIS for executing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
try:
import scipy.stats.distributions as statdistr
from scipy.spatial.distance import cdist
from scipy import signal
from scipy.ndimage import label
from scipy.cluster.vq import vq, kmeans, whiten
cfg.scipyCheck = 'Yes'
except:
cfg.scipyCheck = 'No'
try:
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as mplplt
import matplotlib.colors as mplcolors
cfg.matplotlibCheck = 'Yes'
except Exception as err:
cfg.testMatplotlibV = err
cfg.matplotlibCheck = 'No'
class SemiAutomaticClassificationPlugin:
def __init__(self, iface):
try:
cfg.osSCP = os
cfg.sysSCP = sys
cfg.platformSCP = platform
cfg.shutilSCP = shutil
cfg.inspectSCP = inspect
cfg.timeSCP = time
cfg.datetimeSCP = datetime
cfg.subprocessSCP = subprocess
cfg.urllibSCP = urllib
cfg.requestsSCP = requests
cfg.itertoolsSCP = itertools
cfg.zipfileSCP = zipfile
cfg.tarfileSCP = tarfile
cfg.base64SCP = base64
cfg.randomSCP = random
cfg.QtCoreSCP = QtCore
cfg.QtGuiSCP = QtGui
cfg.QtWidgetsSCP = QtWidgets
cfg.QTreeWidgetItemSCP = QTreeWidgetItem
cfg.QNetworkRequestSCP = QNetworkRequest
cfg.QtSCP = Qt
cfg.QVariantSCP = QVariant
cfg.QFileInfoSCP = QFileInfo
cfg.QSettingsSCP = QSettings
cfg.QDirSCP = QDir
cfg.QDateSCP = QDate
cfg.qgisCoreSCP = qgisCore
cfg.qgisGuiSCP = qgisGui
cfg.gdalSCP = gdal
cfg.ogrSCP = ogr
cfg.osrSCP = osr
cfg.sslSCP = ssl
cfg.smtplibSCP = smtplib
cfg.CookieJarSCP = CookieJar
cfg.gcSCP = gc
cfg.reSCP = re
cfg.ETSCP = ET
cfg.minidomSCP = minidom
cfg.jsonSCP = json
cfg.hashlibSCP = hashlib
cfg.ctypesSCP = ctypes
cfg.shlexSCP = shlex
cfg.counterSCP = Counter
cfg.multiPSCP = mp
cfg.poolSCP = Pool
cfg.MultiManagerSCP = Manager
except:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please, restart QGIS for executing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
return
try:
cfg.np = np
except:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Error. Check Python Numpy installation for the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Critical)
try:
if cfg.scipyCheck == 'Yes':
cfg.statdistrSCP = statdistr
cfg.cdistSCP = cdist
cfg.signalSCP = signal
cfg.labelSCP = label
cfg.vqSCP = vq
cfg.kmeansSCP = kmeans
cfg.whitenSCP = whiten
if cfg.matplotlibCheck == 'Yes':
cfg.MaxNLocatorSCP = MaxNLocator
cfg.mplpltSCP = mplplt
cfg.mplcolorsSCP = mplcolors
except:
pass
if cfg.scipyCheck == 'No':
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Error. Check Python Scipy installation for the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Critical)
if cfg.matplotlibCheck == 'No':
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Error. Check Python Matplotlib installation for the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Critical)
if PluginCheck == 'Yes':
# reference to QGIS interface
cfg.iface = iface
# reference to map canvas
cfg.cnvs = iface.mapCanvas()
# create the dialog
cfg.dlg = SemiAutomaticClassificationPluginDialog()
# reference to ui
cfg.ui = cfg.dlg.ui
# class dock dialog
cfg.dockclassdlg = DockClassDialog(cfg.iface.mainWindow(), cfg.iface)
# reference dock class ui
cfg.uidc = cfg.dockclassdlg.ui
# welcome dialog
cfg.welcomedlg = WelcomeDialog()
# spectral signature plot dialog
cfg.spectralplotdlg = SpectralSignatureDialog()
cfg.uisp = cfg.spectralplotdlg.ui
# scatter plot dialog
cfg.scatterplotdlg = ScatterPlotDialog()
cfg.uiscp = cfg.scatterplotdlg.ui
cfg.mx = msgs(cfg.iface)
cfg.utls = Utils()
cfg.SCPD = SCPDock()
cfg.classPrev = ClassificationPreview(cfg.cnvs)
cfg.spSigPlot = SpectralSignaturePlot()
cfg.scaPlT = Scatter_Plot()
cfg.multiROI = MultipleROITab()
cfg.usgsLib = USGS_Spectral_Lib()
cfg.acc = Accuracy()
cfg.crossC = CrossClassification()
cfg.bsComb = BandCombination()
cfg.splitT = SplitTab()
cfg.rprjRstBndsT = ReprojectRasterBands()
cfg.pcaT = PcaTab()
cfg.clusteringT = ClusteringTab()
cfg.classSigT = ClassSignatureTab()
cfg.znlSttRstT = ZonalStatRasterTab()
cfg.vctRstrT = VectorToRasterTab()
cfg.bst = BandsetTab()
cfg.algWT = AlgWeightTab()
cfg.signT = SigThresholdTab()
cfg.LCSignT = LCSigThresholdTab()
cfg.RGBLT = RGBListTab()
cfg.bstLT = BandSetListTab()
cfg.bCalc = BandCalcTab()
cfg.batchT= BatchTab()
cfg.clipMulti = ClipMultipleRasters()
cfg.stackRstr = StackRasterBands()
cfg.mosaicBS = MosaicBandSets()
cfg.cloudMsk = CloudMasking()
cfg.spclDstBS = SpectralDistanceBandsets()
cfg.rndmFrst = ClassRandomForestTab()
cfg.editRstr = EditRaster()
cfg.sieveRstr = SieveRaster()
cfg.ersnRstr = ErosionRaster()
cfg.dltnRstr = DilationRaster()
cfg.clssNghbr = NeighborPixels()
cfg.downProd = DownloadProducts()
cfg.landsatT = LandsatTab()
cfg.ASTERT = ASTERTab()
cfg.MODIST = MODISTab()
cfg.sentinel1T = Sentinel1Tab()
cfg.sentinel2T = Sentinel2Tab()
cfg.sentinel3T = Sentinel3Tab()
cfg.goesT = GOESTab()
cfg.landCC = LandCoverChange()
cfg.classRep = ClassReportTab()
cfg.classTab = ClassificationTab()
cfg.classVect = ClassToVectorTab()
cfg.reclassification = ReclassificationTab()
cfg.sigImport = Signature_Importer()
cfg.mnlROI = ManualROI(cfg.cnvs)
cfg.regionROI = RegionROI(cfg.cnvs)
cfg.dwnlPrdPnt = DownloadProductPointer(cfg.cnvs)
cfg.clipMultiP = ClipMultiplerastersPointer(cfg.cnvs)
cfg.LCSPixel = LCSigPixel(cfg.cnvs)
cfg.LCSPixel2 = LCSigPixel2(cfg.cnvs)
cfg.sets = Settings()
cfg.uiUtls = Ui_Utils()
cfg.ipt = Input()
# connect when map is clicked
cfg.mnlROI.rightClicked.connect(cfg.SCPD.clckR)
cfg.mnlROI.leftClicked.connect(cfg.SCPD.clckL)
cfg.mnlROI.moved.connect(cfg.SCPD.movedPointer)
cfg.regionROI.ROIleftClicked.connect(cfg.SCPD.pointerClickROI)
cfg.regionROI.ROIrightClicked.connect(cfg.SCPD.pointerRightClickROI)
cfg.regionROI.moved.connect(cfg.SCPD.movedPointer)
cfg.clipMultiP.leftClicked.connect(cfg.clipMulti.pointerLeftClick)
cfg.clipMultiP.rightClicked.connect(cfg.clipMulti.pointerRightClick)
cfg.dwnlPrdPnt.leftClicked.connect(cfg.downProd.pointerLeftClick)
cfg.dwnlPrdPnt.rightClicked.connect(cfg.downProd.pointerRightClick)
cfg.classPrev.leftClicked.connect(cfg.SCPD.pointerClickPreview)
cfg.classPrev.rightClicked.connect(cfg.SCPD.pointerRightClickPreview)
cfg.LCSPixel.MaprightClicked.connect(cfg.LCSignT.pointerLeftClick)
cfg.LCSPixel.MapleftClicked.connect(cfg.LCSignT.pointerLeftClick)
cfg.LCSPixel2.MaprightClicked.connect(cfg.spSigPlot.pointerLeftClick)
cfg.LCSPixel2.MapleftClicked.connect(cfg.spSigPlot.pointerLeftClick)
# system variables
cfg.utls.findSystemSpecs()
cfg.utls.readVariables()
# set font
try:
f, s, i = cfg.utls.readQGISVariableFont()
font = cfg.QtGuiSCP.QFont()
font.setFamily(f)
font.setPointSize(int(s))
cfg.dlg.setFont(font)
cfg.ui.menu_treeWidget.setFont(font)
except:
pass
# initialize plugin directory
cfg.plgnDir = cfg.QFileInfoSCP(cfg.qgisCoreSCP.QgsApplication.qgisUserDatabaseFilePath()).path() + '/python/plugins/' + str(__name__).split('.')[0]
# locale name
lclNm = cfg.QSettingsSCP().value('locale/userLocale')[0:2]
self.registryKeys()
if len(cfg.PythonPathSettings) > 0:
mp.set_executable(cfg.PythonPathSettings)
# temporary directory
tmpDir = cfg.utls.getTempDirectory()
cfg.ui.temp_directory_label.setText(tmpDir)
# log file path
cfg.logFile = cfg.tmpDir.replace('//', '/') + '/__0semiautomaticclass.log'
# locale
lclPth = ''
if cfg.QFileInfoSCP(cfg.plgnDir).exists():
lclPth = cfg.plgnDir + '/i18n/semiautomaticclassificationplugin_' + lclNm + '.qm'
if cfg.QFileInfoSCP(lclPth).exists():
trnsltr = cfg.QtCoreSCP.QTranslator()
trnsltr.load(lclPth)
if cfg.QtCoreSCP.qVersion() > '4.3.3':
cfg.QtCoreSCP.QCoreApplication.installTranslator(trnsltr)
# info
cfg.sysSCPInfo = str(' SemiAutomaticClass ' + semiautomaticclassVersion() + ' - QGIS v. ' + str(cfg.QGISVer) + ' L:' + lclNm + ' - OS ' + str(cfg.sysSCPNm) + ' - 64bit =' + cfg.sysSCP64bit)
# multiprocess Windows
if cfg.sysSCPNm == 'Windows':
mp.set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
# Mac OS
elif cfg.sysSCPNm == 'Darwin':
dPref = os.environ['PATH'].split(':')
for flPref in dPref:
flPrefPy = os.path.join(flPref, 'python3')
# first test
if os.path.isfile(flPrefPy):
mp.set_executable(flPrefPy)
cfg.sysSCPInfo = cfg.sysSCPInfo + ' - python path =' + flPrefPy
# second test
if 'library' in flPref.lower():
if os.path.isfile(flPrefPy):
mp.set_executable(flPrefPy)
cfg.sysSCPInfo = cfg.sysSCPInfo + ' - python path =' + flPrefPy
break
# GDAL config
try:
cfg.gdalSCP.SetConfigOption('GDAL_NUM_THREADS', str(cfg.threads))
cfg.gdalSCP.SetCacheMax(int(cfg.RAMValue * 0.3 * 1000000))
cfg.gdalSCP.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'TRUE')
cfg.gdalSCP.SetConfigOption('GDAL_CACHEMAX', '4')
cfg.gdalSCP.SetConfigOption('VSI_CACHE', 'FALSE')
except:
pass
# read registry keys
def registryKeys(self):
''' registry keys '''
cfg.firstInstallVal = cfg.utls.readRegistryKeys(cfg.regFirstInstall, cfg.firstInstallVal)
cfg.logSetVal = cfg.utls.readRegistryKeys(cfg.regLogKey, cfg.logSetVal)
cfg.downNewsVal = cfg.utls.readRegistryKeys(cfg.downNewsKey, cfg.downNewsVal)
cfg.vrtRstProjVal = cfg.utls.readRegistryKeys(cfg.vrtRstProjKey, cfg.vrtRstProjVal)
cfg.ROIClrVal = cfg.utls.readRegistryKeys(cfg.regROIClr, cfg.ROIClrVal)
cfg.ROITrnspVal = int(cfg.utls.readRegistryKeys(cfg.regROITransp, cfg.ROITrnspVal))
cfg.outTempRastFormat = cfg.utls.readRegistryKeys(cfg.regTempRasterFormat, str(cfg.outTempRastFormat))
cfg.rasterCompression = cfg.utls.readRegistryKeys(cfg.regRasterCompression, str(cfg.rasterCompression))
cfg.parallelWritingCheck = cfg.utls.readRegistryKeys(cfg.regparallelWritingCheck, str(cfg.parallelWritingCheck))
cfg.RAMValue = int(cfg.utls.readRegistryKeys(cfg.regRAMValue, str(cfg.RAMValue)))
cfg.threads = int(cfg.utls.readRegistryKeys(cfg.regThreadsValue, str(cfg.threads)))
cfg.gdalPath = cfg.utls.readRegistryKeys(cfg.regGDALPathSettings, str(cfg.gdalPath))
cfg.PythonPathSettings = cfg.utls.readRegistryKeys(cfg.regPythonPathSettings, str(cfg.PythonPathSettings))
cfg.PythonModulesPathSettings = cfg.utls.readRegistryKeys(cfg.regPythonModulesPathSettings, str(cfg.PythonModulesPathSettings))
cfg.tmpDir = cfg.utls.readRegistryKeys(cfg.regTmpDir, cfg.tmpDir)
cfg.fldID_class = cfg.utls.readRegistryKeys(cfg.regIDFieldName, cfg.fldID_class)
cfg.fldMacroID_class = cfg.utls.readRegistryKeys(cfg.regMacroIDFieldName, cfg.fldMacroID_class)
cfg.macroclassCheck = cfg.utls.readRegistryKeys(cfg.regConsiderMacroclass, cfg.macroclassCheck)
cfg.sentinelAlternativeSearch = cfg.utls.readRegistryKeys(cfg.regSentinelAlternativeSearch, cfg.sentinelAlternativeSearch)
cfg.LCsignatureCheckBox = cfg.utls.readRegistryKeys(cfg.regLCSignature, cfg.LCsignatureCheckBox)
cfg.fldROI_info = cfg.utls.readRegistryKeys(cfg.regInfoFieldName, cfg.fldROI_info)
cfg.fldROIMC_info = cfg.utls.readRegistryKeys(cfg.regMCInfoFieldName, cfg.fldROIMC_info)
cfg.variableName = cfg.utls.readRegistryKeys(cfg.regVariableName, cfg.variableName)
cfg.vectorVariableName = cfg.utls.readRegistryKeys(cfg.regVectorVariableName, cfg.vectorVariableName)
cfg.SMTPCheck = cfg.utls.readRegistryKeys(cfg.regSMTPCheck, cfg.SMTPCheck)
cfg.SMTPServer = cfg.utls.readRegistryKeys(cfg.regSMTPServer, cfg.SMTPServer)
cfg.SMTPtoEmails = cfg.utls.readRegistryKeys(cfg.regSMTPtoEmails, cfg.SMTPtoEmails)
cfg.SMTPUser = cfg.utls.readRegistryKeys(cfg.regSMTPUser, cfg.SMTPUser)
cfg.SMTPPassword = cfg.utls.readRegistryKeys(cfg.regSMTPPassword, cfg.SMTPPassword)
cfg.USGSUser = cfg.utls.readRegistryKeys(cfg.regUSGSUser, cfg.USGSUser)
cfg.USGSPass = cfg.utls.readRegistryKeys(cfg.regUSGSPass, cfg.USGSPass)
cfg.USGSUserASTER = cfg.utls.readRegistryKeys(cfg.regUSGSUserASTER, cfg.USGSUserASTER)
cfg.USGSPassASTER = cfg.utls.readRegistryKeys(cfg.regUSGSPassASTER, cfg.USGSPassASTER)
cfg.SciHubUser = cfg.utls.readRegistryKeys(cfg.regSciHubUser, cfg.SciHubUser)
cfg.SciHubService = cfg.utls.readRegistryKeys(cfg.regSciHubService, cfg.SciHubService)
cfg.SciHubPass = cfg.utls.readRegistryKeys(cfg.regSciHubPass, cfg.SciHubPass)
cfg.sigPLRoundCharList = cfg.roundCharList
cfg.scatPlRoundCharList = cfg.roundCharList
cfg.grpNm = cfg.utls.readRegistryKeys(cfg.regGroupName, cfg.grpNm)
cfg.rasterDataType = cfg.utls.readRegistryKeys(cfg.regRasterDataType, cfg.rasterDataType)
cfg.expressionListBC = cfg.utls.readRegistryKeys(cfg.regExpressionListBC, cfg.expressionListBC)
cfg.soundVal = cfg.utls.readRegistryKeys(cfg.regSound, cfg.soundVal)
cfg.windowSizeW = cfg.utls.readRegistryKeys(cfg.regWindowSizeW, cfg.windowSizeW)
cfg.windowSizeH = cfg.utls.readRegistryKeys(cfg.regWindowSizeH, cfg.windowSizeH)
cfg.splitterSizeS = cfg.utls.readRegistryKeys(cfg.regSplitterSizeS, cfg.splitterSizeS)
def initGui(self):
if PluginCheck == 'Yes':
try:
cfg.iface.addDockWidget(cfg.QtSCP.LeftDockWidgetArea, cfg.dockclassdlg)
except:
msg = ''
try:
import scipy.stats.distributions as statdistr
except:
msg = 'SciPy'
try:
from matplotlib.ticker import MaxNLocator
except:
msg = 'Matplotlib'
try:
import numpy as np
except:
msg = 'NumPy'
try:
from osgeo import gdal
except:
msg = 'Gdal'
if len(msg) > 0:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Semi-Automatic Classification Plugin possible missing dependecies: ' + msg), level=qgisCore.Qgis.Info)
else:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please restart QGIS for installing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
return
from .modules.modules import Modules
cfg.SCPModules = Modules()
cfg.SCPModules.loading()
cfg.ipt.loadInputToolbar()
cfg.algName = cfg.algMinDist
cfg.ui.algorithm_combo.setCurrentIndex(0)
# vector to raster type of conversion
cfg.ui.conversion_type_combo.addItem(cfg.convCenterPixels)
cfg.ui.conversion_type_combo.addItem(cfg.convAllPixelsTouch)
cfg.centerOfPixels = cfg.ui.conversion_type_combo.itemText(0)
''' menu '''
cfg.ipt.loadMenu()
# set plugin version
cfg.ui.plugin_version_label.setText(semiautomaticclassVersion())
cfg.uidc.plugin_version_label2.setText('SCP ' + semiautomaticclassVersion())
# row height
cfg.ui.download_images_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.tableWidget_band_calc.verticalHeader().setDefaultSectionSize(24)
cfg.ui.landsat_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.sentinel_2_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.sentinel_2_tableWidget, [[0, 400], [1, 200], [2, 60]])
cfg.ui.ASTER_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.ASTER_tableWidget, [[0, 400], [1, 200], [2, 60]])
cfg.ui.MODIS_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.MODIS_tableWidget, [[0, 400], [1, 200], [2, 60]])
cfg.ui.LCS_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.signature_threshold_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.point_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.log_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.log_tableWidget, [[0, 100], [1, 200], [2, 800]])
# spectral signature plot list
cfg.utls.insertTableColumn(cfg.uisp.signature_list_plot_tableWidget, 6, cfg.tableColString, None, 'Yes')
cfg.utls.sortTableColumn(cfg.uisp.signature_list_plot_tableWidget, 3)
cfg.utls.setColumnWidthList(cfg.uisp.signature_list_plot_tableWidget, [[0, 30], [1, 40], [2, 100], [3, 40], [4, 100], [5, 30]])
try:
cfg.uisp.signature_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(2, cfg.QtWidgetsSCP.QHeaderView.Stretch)
cfg.uisp.signature_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(4, cfg.QtWidgetsSCP.QHeaderView.Stretch)
except:
pass
cfg.SCPD.clearTree()
# passwords
cfg.ui.smtp_password_lineEdit.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
cfg.ui.password_usgs_lineEdit.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
cfg.ui.password_usgs_lineEdit_2.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
cfg.ui.password_scihub_lineEdit.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
# scatter plot list
cfg.utls.insertTableColumn(cfg.uiscp.scatter_list_plot_tableWidget, 6, cfg.tableColString, None, 'Yes')
cfg.utls.sortTableColumn(cfg.uiscp.scatter_list_plot_tableWidget, 3)
cfg.utls.setColumnWidthList(cfg.uiscp.scatter_list_plot_tableWidget, [[0, 30], [1, 40], [2, 100], [3, 40], [4, 100], [5, 30]])
try:
cfg.uiscp.scatter_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(2, cfg.QtWidgetsSCP.QHeaderView.Stretch)
cfg.uiscp.scatter_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(4, cfg.QtWidgetsSCP.QHeaderView.Stretch)
except:
pass
# signature threshold
cfg.utls.insertTableColumn(cfg.ui.signature_threshold_tableWidget, 7, cfg.tableColString, None, 'Yes')
cfg.utls.setColumnWidthList(cfg.ui.signature_threshold_tableWidget, [[4, 100], [5, 100], [6, 100]])
try:
cfg.ui.signature_threshold_tableWidget.horizontalHeader().setSectionResizeMode(1, cfg.QtWidgetsSCP.QHeaderView.Stretch)
cfg.ui.signature_threshold_tableWidget.horizontalHeader().setSectionResizeMode(3, cfg.QtWidgetsSCP.QHeaderView.Stretch)
except:
pass
# product download tab
cfg.utls.setColumnWidthList(cfg.ui.download_images_tableWidget, [[0, 100], [1, 400]])
# USGS spectral lbrary
cfg.usgsLib.addSpectralLibraryToCombo(cfg.usgs_lib_list)
cfg.usgs_C1p = cfg.plgnDir + '/' + cfg.usgs_C1p
cfg.usgs_C2p = cfg.plgnDir + '/' + cfg.usgs_C2p
cfg.usgs_C3p = cfg.plgnDir + '/' + cfg.usgs_C3p
cfg.usgs_C4p = cfg.plgnDir + '/' + cfg.usgs_C4p
cfg.usgs_C5p = cfg.plgnDir + '/' + cfg.usgs_C5p
cfg.usgs_C6p = cfg.plgnDir + '/' + cfg.usgs_C6p
cfg.usgs_C7p = cfg.plgnDir + '/' + cfg.usgs_C7p
# band calc expression
cfg.bCalc.createExpressionList(cfg.expressionListBC)
cfg.batchT.addFunctionsToTable(cfg.functionNames)
cfg.bst.addSatelliteToCombo(cfg.satWlList)
cfg.downProd.addSatelliteToCombo(cfg.downProductList)
cfg.scaPlT.addColormapToCombo(cfg.scatterColorMap)
cfg.bst.addUnitToCombo(cfg.unitList)
cfg.SCPD.previewSize()
# set log state
if cfg.logSetVal == 'Yes':
cfg.ui.log_checkBox.setCheckState(2)
cfg.mx.msg19()
elif cfg.logSetVal == 'No':
cfg.ui.log_checkBox.setCheckState(0)
# set download news state
cfg.ui.download_news_checkBox.setCheckState(int(cfg.downNewsVal))
# set download news state
cfg.ui.virtual_raster_load_checkBox.setCheckState(int(cfg.vrtRstProjVal))
# set raster format
if cfg.outTempRastFormat == 'VRT':
cfg.ui.virtual_raster_checkBox.setCheckState(2)
elif cfg.outTempRastFormat == 'GTiff':
cfg.ui.virtual_raster_checkBox.setCheckState(0)
# set raster compression
if cfg.rasterCompression == 'Yes':
cfg.ui.raster_compression_checkBox.setCheckState(2)
elif cfg.rasterCompression == 'No':
cfg.ui.raster_compression_checkBox.setCheckState(0)
# set raster compression
if cfg.parallelWritingCheck == 'Yes':
cfg.ui.parallel_writing_checkBox.setCheckState(2)
elif cfg.parallelWritingCheck == 'No':
cfg.ui.parallel_writing_checkBox.setCheckState(0)
# set SMTP checkbox state
cfg.ui.smtp_checkBox.setCheckState(int(cfg.SMTPCheck))
# set sound state
cfg.ui.sound_checkBox.setCheckState(int(cfg.soundVal))
# connect to project loaded
cfg.qgisCoreSCP.QgsProject.instance().readProject.connect(self.projectLoaded)
cfg.qgisCoreSCP.QgsProject.instance().projectSaved.connect(self.projectSaved)
cfg.iface.newProjectCreated.connect(self.newProjectLoaded)
#cfg.qgisCoreSCP.QgsProject.instance().readMapLayer.connect(self.test)
#cfg.qgisCoreSCP.QgsProject.instance().layerLoaded.connect(self.test)
''' Help tab '''
cfg.utls.makeDirectory(cfg.tmpDir + '/_images/')
cfg.ui.help_textBrowser.setSearchPaths([cfg.tmpDir])
''' Docks '''
# set ROI color
cfg.ui.change_color_Button.setStyleSheet('background-color :' + cfg.ROIClrVal)
# set ROI transparency
cfg.ui.transparency_Slider.setValue(cfg.ROITrnspVal)
# set RAM value
cfg.ui.RAM_spinBox.setValue(cfg.RAMValue)
# set CPU value
cfg.ui.CPU_spinBox.setValue(cfg.threads)
# macroclass checkbox
if cfg.macroclassCheck == 'No':
cfg.ui.macroclass_checkBox.setCheckState(0)
cfg.ui.class_checkBox.blockSignals(True)
cfg.ui.class_checkBox.setCheckState(2)
cfg.ui.class_checkBox.blockSignals(False)
elif cfg.macroclassCheck == 'Yes':
cfg.ui.macroclass_checkBox.setCheckState(2)
cfg.ui.class_checkBox.blockSignals(True)
cfg.ui.class_checkBox.setCheckState(0)
cfg.ui.class_checkBox.blockSignals(False)
# macroclass checkbox
if cfg.macroclassCheckRF == 'No':
cfg.ui.macroclass_checkBox_rf.setCheckState(0)
cfg.ui.class_checkBox_rf.blockSignals(True)
cfg.ui.class_checkBox_rf.setCheckState(2)
cfg.ui.class_checkBox_rf.blockSignals(False)
elif cfg.macroclassCheckRF == 'Yes':
cfg.ui.macroclass_checkBox_rf.setCheckState(2)
cfg.ui.class_checkBox_rf.blockSignals(True)
cfg.ui.class_checkBox_rf.setCheckState(0)
cfg.ui.class_checkBox_rf.blockSignals(False)
# LC signature checkbox
if cfg.LCsignatureCheckBox == 'No':
cfg.ui.LC_signature_checkBox.setCheckState(0)
elif cfg.LCsignatureCheckBox == 'Yes':
cfg.ui.LC_signature_checkBox.setCheckState(2)
try:
# set SMTP server
cfg.ui.smtp_server_lineEdit.setText(cfg.SMTPServer)
# set SMTP to emails
cfg.ui.to_email_lineEdit.setText(cfg.SMTPtoEmails)
# set SMTP user and password
cfg.ui.smtp_user_lineEdit.setText(cfg.SMTPUser)
if cfg.SMTPPassword is not None:
SMTPPsw = cfg.utls.decryptPassword(cfg.SMTPPassword[2:-1])
cfg.ui.smtp_password_lineEdit.setText(str(SMTPPsw)[2:-1])
cfg.SMTPPassword = str(SMTPPsw)[2:-1]
# set USGS user and password
cfg.ui.user_usgs_lineEdit.setText(cfg.USGSUser)
if cfg.USGSPass is not None:
USGSPsw = cfg.utls.decryptPassword(cfg.USGSPass[2:-1])
cfg.ui.password_usgs_lineEdit.setText(str(USGSPsw)[2:-1])
cfg.ui.user_usgs_lineEdit_2.setText(cfg.USGSUserASTER)
if cfg.USGSPassASTER is not None:
USGSPsw2 = cfg.utls.decryptPassword(cfg.USGSPassASTER[2:-1])
cfg.ui.password_usgs_lineEdit_2.setText(str(USGSPsw2)[2:-1])
# set SciHub user and password
cfg.ui.sentinel_service_lineEdit.setText(cfg.SciHubService)
cfg.ui.user_scihub_lineEdit.setText(cfg.SciHubUser)
if cfg.SciHubPass is not None:
sciHubPsw = cfg.utls.decryptPassword(cfg.SciHubPass[2:-1])
cfg.ui.password_scihub_lineEdit.setText(str(sciHubPsw)[2:-1])
except Exception as err:
# logger
cfg.utls.logCondition(str(__name__) + '-' + (cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), ' ERROR exception: ' + str(err))
cfg.ui.sentinel2_alternative_search_checkBox.blockSignals(True)
cfg.ui.sentinel2_alternative_search_checkBox.setCheckState(int(cfg.sentinelAlternativeSearch))
cfg.ui.sentinel2_alternative_search_checkBox.blockSignals(False)
''' SCP tab '''
cfg.ui.SCP_tabs.currentChanged.connect(cfg.ipt.SCPTabChanged)
cfg.ui.main_tabWidget.currentChanged.connect(cfg.ipt.mainTabChanged)
# hide tabs
cfg.ui.SCP_tabs.setStyleSheet('QTabBar::tab {padding: 0px; max-height: 0px;}')
# set window size
cfg.dlg.resize(int(cfg.windowSizeW), int(cfg.windowSizeH))
cfg.ui.widget.setMinimumSize(cfg.QtCoreSCP.QSize(50, 0))
cfg.ui.widget.setMaximumSize(cfg.QtCoreSCP.QSize(400, 16777215))
cfg.ui.splitter.setSizes(eval(cfg.splitterSizeS))
cfg.ui.splitter.splitterMoved.connect(cfg.ipt.movedSplitter)
cfg.ui.menu_treeWidget.itemSelectionChanged.connect(cfg.ipt.menuIndex)
cfg.ui.f_filter_lineEdit.textChanged.connect(cfg.ipt.filterTree)
''' Multiple ROI tab '''
# connect to add point
cfg.ui.add_point_pushButton.clicked.connect(cfg.multiROI.addPointToTable)
# connect to create random points
cfg.ui.add_random_point_pushButton.clicked.connect(cfg.multiROI.createRandomPoint)
# connect to remove point
cfg.ui.remove_point_pushButton.clicked.connect(cfg.multiROI.removePointFromTable)
# connect to save point ROIs
cfg.ui.save_point_rois_pushButton.clicked.connect(cfg.multiROI.createROIfromPoint)
# connect to import points
cfg.ui.import_point_list_pushButton.clicked.connect(cfg.multiROI.importPoints)
# connect to export point list
cfg.ui.export_point_list_pushButton.clicked.connect(cfg.multiROI.exportPointList)
# connect the signature calculation checkBox 2
cfg.ui.signature_checkBox2.stateChanged.connect(cfg.multiROI.signatureCheckbox2)
# connect to text changed
cfg.ui.stratified_lineEdit.textChanged.connect(cfg.multiROI.textChanged)
''' Import spectral signature tab '''
# connect the import library
cfg.ui.open_library_pushButton.clicked.connect(cfg.SCPD.openLibraryFile)
# connect the open shapefile
cfg.ui.open_shapefile_pushButton.clicked.connect(cfg.sigImport.openShapefileI)
# connect the import shapefile
cfg.ui.import_shapefile_pushButton.clicked.connect(cfg.utls.importShapefile)
# connect the chapter changed
cfg.ui.usgs_chapter_comboBox.currentIndexChanged.connect(cfg.usgsLib.chapterChanged)
# connect the library changed
cfg.ui.usgs_library_comboBox.currentIndexChanged.connect(cfg.usgsLib.libraryChanged)
# connect the close library
cfg.ui.add_usgs_library_pushButton.clicked.connect(cfg.usgsLib.addSignatureToList)
''' Export spectral signature tab '''
# connect to export signature to SCP file
cfg.ui.export_SCP_pushButton.clicked.connect(cfg.SCPD.exportSignatureFile)
cfg.ui.export_SHP_pushButton.clicked.connect(cfg.SCPD.exportSignatureShapefile)
# connect to export signature to CSV
cfg.ui.export_CSV_library_toolButton.clicked.connect(cfg.SCPD.exportToCSVLibrary)
''' Algorithm weight tab '''
cfg.ui.reset_weights_pushButton.clicked.connect(cfg.algWT.resetWeights)
cfg.ui.set_weight_value_pushButton.clicked.connect(cfg.algWT.setWeights)
''' Signature threshold tab '''
# edited cell
cfg.ui.signature_threshold_tableWidget.cellChanged.connect(cfg.signT.editedThresholdTable)
cfg.ui.reset_threshold_pushButton.clicked.connect(cfg.signT.resetThresholds)
cfg.ui.automatic_threshold_pushButton.clicked.connect(cfg.signT.setAllWeightsVariance)
cfg.ui.set_threshold_value_pushButton.clicked.connect(cfg.signT.setThresholds)
cfg.ui.signature_threshold_tableWidget.horizontalHeader().sectionClicked.connect(cfg.signT.orderedTable)
''' LC Signature threshold tab '''
cfg.ui.LCS_tableWidget.cellChanged.connect(cfg.LCSignT.editedThresholdTable)
cfg.ui.LCS_tableWidget.horizontalHeader().sectionClicked.connect(cfg.LCSignT.orderedTable)
cfg.ui.automatic_threshold_pushButton_2.clicked.connect(cfg.LCSignT.setAllWeightsVariance)
# connect to activate pointer
cfg.ui.LCS_pointerButton.clicked.connect(cfg.LCSignT.pointerActive)
cfg.ui.LCS_ROI_button.clicked.connect(cfg.LCSignT.ROIThreshold)
cfg.ui.set_min_max_Button.clicked.connect(cfg.LCSignT.setMinimumMaximum)
# connect the include signature checkBox
cfg.ui.LCS_include_checkBox.stateChanged.connect(cfg.LCSignT.includeCheckbox)
cfg.ui.LCS_cut_checkBox.stateChanged.connect(cfg.LCSignT.cutCheckbox)
# add to spectral signature plot
cfg.ui.signature_spectral_plot_toolButton_2.clicked.connect(cfg.LCSignT.addSignatureToSpectralPlot)
''' RGB List tab '''
cfg.ui.RGB_tableWidget.cellChanged.connect(cfg.RGBLT.editedTable)
cfg.ui.add_RGB_pushButton.clicked.connect(cfg.RGBLT.addRGBToTable)
cfg.ui.remove_RGB_toolButton.clicked.connect(cfg.RGBLT.removeRGBFromTable)
cfg.ui.sort_by_name_toolButton_2.clicked.connect(cfg.RGBLT.sortRGBName)
cfg.ui.clear_RGB_list_toolButton.clicked.connect(cfg.RGBLT.clearTableAction)
cfg.ui.move_up_toolButton_3.clicked.connect(cfg.RGBLT.moveUpRGB)
cfg.ui.move_down_toolButton_3.clicked.connect(cfg.RGBLT.moveDownRGB)
cfg.ui.all_RGB_list_toolButton.clicked.connect(cfg.RGBLT.allRGBListAction)
cfg.ui.export_RGB_List_toolButton.clicked.connect(cfg.RGBLT.exportRGBList)
cfg.ui.import_RGB_List_toolButton.clicked.connect(cfg.RGBLT.importRGB)
''' Band set List tab '''
cfg.ui.add_bandset_pushButton.clicked.connect(cfg.bstLT.addBandSetToTable)
cfg.ui.rgb_toolButton.clicked.connect(cfg.bstLT.displayRGB)
cfg.ui.remove_bandset_toolButton.clicked.connect(cfg.bstLT.removeBandSetFromTable)
cfg.ui.move_up_toolButton_4.clicked.connect(cfg.bstLT.moveUpBandset)
cfg.ui.move_down_toolButton_4.clicked.connect(cfg.bstLT.moveDownBandset)
# connect to double click
cfg.ui.band_set_list_tableWidget.doubleClicked.connect(cfg.bstLT.doubleClick)
cfg.ui.export_bandset_List_toolButton.clicked.connect(cfg.bstLT.exportList)
cfg.ui.import_bandset_List_toolButton.clicked.connect(cfg.bstLT.importList)
# connect to filter
cfg.ui.band_set_filter_lineEdit.textChanged.connect(cfg.bstLT.filterTable)
''' Download product tab '''
# connect to find images button
cfg.ui.find_images_toolButton.clicked.connect(cfg.downProd.findImages)
cfg.ui.selectUL_toolButton_3.clicked.connect(cfg.downProd.pointerActive)
# connect to display button
cfg.ui.toolButton_display.clicked.connect(cfg.downProd.displayImages)
cfg.ui.toolButton_OSM.clicked.connect(cfg.downProd.displayOSM)
cfg.ui.remove_image_toolButton.clicked.connect(cfg.downProd.removeImageFromTable)
cfg.ui.clear_table_toolButton.clicked.connect(cfg.downProd.clearTable)
cfg.ui.download_images_Button.clicked.connect(cfg.downProd.downloadImages)
cfg.ui.export_links_Button.clicked.connect(cfg.downProd.exportLinks)
cfg.ui.import_table_pushButton.clicked.connect(cfg.downProd.importTableText)
cfg.ui.export_table_pushButton.clicked.connect(cfg.downProd.exportTableText)
cfg.ui.check_toolButton.clicked.connect(cfg.downProd.checkAllBands)
cfg.ui.show_area_radioButton_2.clicked.connect(cfg.downProd.showHideArea)
cfg.ui.remember_user_checkBox_2.stateChanged.connect(cfg.downProd.rememberUserCheckbox)
cfg.ui.user_usgs_lineEdit.editingFinished.connect(cfg.downProd.rememberUser)
cfg.ui.password_usgs_lineEdit.editingFinished.connect(cfg.downProd.rememberUser)
cfg.ui.reset_sentinel_service_toolButton.clicked.connect(cfg.downProd.resetService)
cfg.ui.remember_user_checkBox.stateChanged.connect(cfg.downProd.rememberUserCheckboxSentinel2)
cfg.ui.sentinel2_alternative_search_checkBox.stateChanged.connect(cfg.downProd.alternativeCheckboxSentinel2)
cfg.ui.user_scihub_lineEdit.editingFinished.connect(cfg.downProd.rememberUserSentinel2)
cfg.ui.password_scihub_lineEdit.editingFinished.connect(cfg.downProd.rememberUserSentinel2)
cfg.ui.sentinel_service_lineEdit.editingFinished.connect(cfg.downProd.rememberService)
cfg.ui.check_toolButton_2.clicked.connect(cfg.downProd.checkAllBandsSentinel2)
cfg.ui.check_toolButton_3.clicked.connect(cfg.downProd.checkAllBandsSentinel3)
cfg.ui.check_toolButton_4.clicked.connect(cfg.downProd.checkAllBandsGOES)
cfg.ui.remember_user_checkBox_3.stateChanged.connect(cfg.downProd.rememberUserCheckboxEarthdata)
cfg.ui.user_usgs_lineEdit_2.editingFinished.connect(cfg.downProd.rememberUserEarthdata)
cfg.ui.password_usgs_lineEdit_2.editingFinished.connect(cfg.downProd.rememberUserEarthdata)
cfg.ui.download_images_tableWidget.itemSelectionChanged.connect(cfg.downProd.tableClick)
# connect to filter
cfg.ui.products_filter_lineEdit.textChanged.connect(cfg.downProd.filterTable)
''' Classification dock '''
# button band set
cfg.uidc.bandset_toolButton.clicked.connect(cfg.utls.bandSetTab)
cfg.uidc.band_processing_toolButton.clicked.connect(cfg.utls.bandProcessingTab)
cfg.uidc.preprocessing_toolButton_2.clicked.connect(cfg.utls.preProcessingTab)
cfg.uidc.postprocessing_toolButton_2.clicked.connect(cfg.utls.postProcessingTab)
cfg.uidc.bandcalc_toolButton_2.clicked.connect(cfg.utls.bandCalcTab)
cfg.uidc.download_images_toolButton_2.clicked.connect(cfg.utls.selectTabDownloadImages)
cfg.uidc.basic_tools_toolButton.clicked.connect(cfg.utls.basicToolsTab)
cfg.uidc.batch_toolButton.clicked.connect(cfg.utls.batchTab)
cfg.uidc.userguide_toolButton_2.clicked.connect(cfg.ipt.quickGuide)
cfg.uidc.help_toolButton_2.clicked.connect(cfg.ipt.askHelp)
cfg.uidc.support_toolButton.clicked.connect(cfg.ipt.supportSCP)
cfg.uidc.tabWidget_dock.currentChanged.connect(cfg.ipt.dockTabChanged)
# button new input
cfg.uidc.button_new_input.clicked.connect(cfg.SCPD.createInput)
# button reset
cfg.uidc.button_reset_input.clicked.connect(cfg.SCPD.resetInput)
# connect to save to shapefile
cfg.uidc.button_Save_ROI.clicked.connect(cfg.SCPD.saveROItoShapefile)
# connect to undo save ROI
cfg.uidc.undo_save_Button.clicked.connect(cfg.SCPD.undoSaveROI)
cfg.uidc.redo_save_Button.clicked.connect(cfg.SCPD.redoSaveROI)
# connect the signature calculation checkBox
cfg.uidc.signature_checkBox.stateChanged.connect(cfg.SCPD.signatureCheckbox)
cfg.uidc.scatterPlot_toolButton.clicked.connect(cfg.SCPD.addROIToScatterPlot)
# connect the save input checkBox
cfg.uidc.save_input_checkBox.stateChanged.connect(cfg.SCPD.saveInputCheckbox)
# connect to open training file
cfg.uidc.trainingFile_toolButton.clicked.connect(cfg.SCPD.openTrainingFile)
# connect to export signature list file
cfg.uidc.export_signature_list_toolButton.clicked.connect(cfg.utls.exportSignaturesTab)
# connect to import library file
cfg.uidc.import_library_toolButton.clicked.connect(cfg.utls.importSignaturesTab)
# add to spectral signature plot
cfg.uidc.signature_spectral_plot_toolButton.clicked.connect(cfg.SCPD.addSignatureToSpectralPlot)
# connect to filter
cfg.uidc.ROI_filter_lineEdit.textChanged.connect(cfg.SCPD.filterTree)
# connect to delete signature
cfg.uidc.delete_Signature_Button.clicked.connect(cfg.SCPD.removeSelectedSignatures)
# connect to merge signatures
cfg.uidc.merge_signature_toolButton.clicked.connect(cfg.SCPD.mergeSelectedSignatures)
cfg.uidc.calculate_signature_toolButton.clicked.connect(cfg.SCPD.calculateSignatures)
# connect the ROI macroclass ID
cfg.uidc.ROI_Macroclass_ID_spin.valueChanged.connect(cfg.SCPD.setROIMacroID)
# connect the ROI Macroclass
cfg.uidc.ROI_Macroclass_line.editingFinished.connect(cfg.SCPD.roiMacroclassInfo)
# custom expression
cfg.uidc.custom_index_lineEdit.editingFinished.connect(cfg.SCPD.customExpressionEdited)
# connect the ROI Class ID
cfg.uidc.ROI_ID_spin.valueChanged.connect(cfg.SCPD.setROIID)
# connect the ROI Class
cfg.uidc.ROI_Class_line.editingFinished.connect(cfg.SCPD.roiClassInfo)
# connect the rapid ROI checkBox
cfg.uidc.display_cursor_checkBox.stateChanged.connect(cfg.SCPD.vegetationIndexCheckbox)
# connect the vegetation index combo
cfg.uidc.vegetation_index_comboBox.currentIndexChanged.connect(cfg.SCPD.vegetationIndexName)
# connect the rapid ROI checkBox
cfg.uidc.rapid_ROI_checkBox.stateChanged.connect(cfg.SCPD.rapidROICheckbox)
# connect the vegetation index display checkbox
cfg.uidc.rapidROI_band_spinBox.valueChanged.connect(cfg.SCPD.rapidROIband)
''' Classification tab '''
# connect to algorithm weight button
cfg.ui.algorithm_weight_button.clicked.connect(cfg.utls.algorithmBandWeightTab)
# connect to threshold button
cfg.ui.algorithm_threshold_button.clicked.connect(cfg.utls.signatureThresholdTab)
# connect to LCS threshold button
cfg.ui.LC_signature_button.clicked.connect(cfg.utls.LCSThresholdTab)
# connect the algorithm combo
cfg.ui.algorithm_combo.currentIndexChanged.connect(cfg.classTab.algorithmName)
# connect the algorithm threshold
cfg.ui.alg_threshold_SpinBox.valueChanged.connect(cfg.classTab.algorithmThreshold)
# connect to run classification
cfg.ui.button_classification.clicked.connect(cfg.classTab.runClassificationAction)
cfg.ui.classification.clicked.connect(cfg.batchT.setFunctionButton)
# connect the macroclass checkBox
cfg.ui.macroclass_checkBox.stateChanged.connect(cfg.classTab.macroclassCheckbox)
cfg.ui.class_checkBox.stateChanged.connect(cfg.classTab.classCheckbox)
# connect the LC signature checkBox
cfg.ui.LC_signature_checkBox.stateChanged.connect(cfg.classTab.LCSignature_Checkbox)
# connect the mask checkBox
cfg.ui.mask_checkBox.stateChanged.connect(cfg.classTab.maskCheckbox)
# connect to reset qml button
cfg.ui.resetQmlButton.clicked.connect(cfg.classTab.resetQmlStyle)
# connect to reset mask button
cfg.ui.resetMaskButton.clicked.connect(cfg.classTab.resetMask)
# connect to qml button
cfg.ui.qml_Button.clicked.connect(cfg.classTab.selectQmlStyle)
''' Spectral signature plot '''
# connect the sigma checkBox
cfg.uisp.sigma_checkBox.stateChanged.connect(cfg.spSigPlot.sigmaCheckbox)
cfg.uisp.band_lines_checkBox.stateChanged.connect(cfg.spSigPlot.refreshPlot)
cfg.uisp.grid_checkBox.stateChanged.connect(cfg.spSigPlot.refreshPlot)
# connect to remove signature button
cfg.uisp.remove_Signature_Button.clicked.connect(cfg.spSigPlot.removeSignature)
# connect to calculate spectral distances button
cfg.uisp.calculate_spectral_distance_Button.clicked.connect(cfg.spSigPlot.calculateSpectralDistances)
# connect to fit to axes
cfg.uisp.fitToAxes_pushButton.clicked.connect(cfg.spSigPlot.fitPlotToAxes)
# connect to plot spinbox
cfg.uisp.plot_text_spinBox.valueChanged.connect(cfg.spSigPlot.setPlotLegendLenght)
# connect to value range
cfg.uisp.value_range_pushButton.clicked.connect(cfg.spSigPlot.editValueRange)
cfg.uisp.set_min_max_Button.clicked.connect(cfg.spSigPlot.setMinimumMaximum)
cfg.uisp.automatic_threshold_pushButton_2.clicked.connect(cfg.spSigPlot.setAllWeightsVariance)
# connect to activate pointer
cfg.uisp.LCS_pointerButton_2.clicked.connect(cfg.spSigPlot.pointerActive)
cfg.uisp.LCS_ROI_button_2.clicked.connect(cfg.spSigPlot.ROIThreshold)
# undo threshold
cfg.uisp.undo_threshold_Button.clicked.connect(cfg.spSigPlot.undoThreshold)
# connect the include signature checkBox
cfg.uisp.LCS_include_checkBox_2.stateChanged.connect(cfg.spSigPlot.includeCheckbox)
cfg.uisp.LCS_cut_checkBox_2.stateChanged.connect(cfg.spSigPlot.cutCheckbox)
# connect to add to signature list
cfg.uisp.add_signature_list_pushButton.clicked.connect(cfg.spSigPlot.addToSignatureList)
# connect to save plot
cfg.uisp.save_plot_pushButton.clicked.connect(cfg.spSigPlot.savePlot)
# connect to edited cell
cfg.uisp.signature_list_plot_tableWidget.cellChanged.connect(cfg.spSigPlot.editedCell)
cfg.uisp.signature_list_plot_tableWidget.horizontalHeader().sectionClicked.connect(cfg.spSigPlot.orderedTable)
# connect to signature plot list double click
cfg.uisp.signature_list_plot_tableWidget.doubleClicked.connect(cfg.spSigPlot.signatureListDoubleClick)
''' Scatter plot tab '''
# connect to scatter plot button
cfg.uiscp.scatter_ROI_Button.clicked.connect(cfg.scaPlT.scatterPlotCalc)
# connect to Band X spinbox
cfg.uiscp.bandX_spinBox.valueChanged.connect(cfg.scaPlT.bandXPlot)
# connect to Band Y spinbox
cfg.uiscp.bandY_spinBox.valueChanged.connect(cfg.scaPlT.bandYPlot)
# connect double click ROI list to zoom
cfg.uiscp.scatter_list_plot_tableWidget.doubleClicked.connect(cfg.scaPlT.scatterPlotDoubleClick)
# connect to edited cell
cfg.uiscp.scatter_list_plot_tableWidget.cellChanged.connect(cfg.scaPlT.editedCell)
# connect to remove signature button
cfg.uiscp.remove_Signature_Button.clicked.connect(cfg.scaPlT.removeScatter)
# connect to save plot
cfg.uiscp.save_plot_pushButton_2.clicked.connect(cfg.scaPlT.savePlot)
# connect to fit to axes
cfg.uiscp.fitToAxes_pushButton_2.clicked.connect(cfg.scaPlT.fitPlotToAxes)
cfg.uiscp.plot_temp_ROI_pushButton.clicked.connect(cfg.scaPlT.addTempROIToScatterPlot)
cfg.uiscp.plot_display_pushButton.clicked.connect(cfg.scaPlT.addDisplayToScatterPlot)
cfg.uiscp.plot_image_pushButton.clicked.connect(cfg.scaPlT.addImageToScatterPlot)
# connect to change color button
cfg.uiscp.polygon_color_Button.clicked.connect(cfg.scaPlT.changePolygonColor)
cfg.uiscp.plot_color_ROI_pushButton.clicked.connect(cfg.scaPlT.colorPlot)
# connect to select value range
cfg.uiscp.draw_polygons_pushButton.clicked.connect(cfg.scaPlT.selectRange)
cfg.uiscp.remove_polygons_pushButton.clicked.connect(cfg.scaPlT.removePolygons)
cfg.uiscp.show_polygon_area_pushButton.clicked.connect(cfg.scaPlT.showScatterPolygonArea)
cfg.uiscp.add_signature_list_pushButton.clicked.connect(cfg.scaPlT.addToSignatureList)
''' Band set tab '''
# connect to refresh button
cfg.ui.toolButton_reload_3.clicked.connect(cfg.bst.rasterBandName)
# button reload
cfg.ui.toolButton_reload.clicked.connect(cfg.ipt.checkRefreshRasterLayer)
# connect to add file button
cfg.ui.toolButton_input_raster.clicked.connect(cfg.bst.addFileToBandSetAction)
# connect to add raster band button
cfg.ui.add_raster_bands_Button.clicked.connect(cfg.bst.addBandToSet)
# connect to select all bands button
cfg.ui.select_all_bands_Button.clicked.connect(cfg.bst.selectAllBands)
# connect to clear band set button
cfg.ui.clear_bandset_toolButton.clicked.connect(cfg.bst.clearBandSetAction)
# connect to move up band button
cfg.ui.move_up_toolButton.clicked.connect(cfg.bst.moveUpBand)
# connect to move down band button
cfg.ui.move_down_toolButton.clicked.connect(cfg.bst.moveDownBand)
# connect to sort by name button
cfg.ui.sort_by_name_toolButton.clicked.connect(cfg.bst.sortBandName)
# connect to remove band button
cfg.ui.remove_toolButton.clicked.connect(cfg.bst.removeBand)
# connect add band set
cfg.ui.add_band_set_toolButton.clicked.connect(cfg.bst.addBandSetTabAction)
# connect to changed tab
cfg.ui.Band_set_tabWidget.currentChanged.connect(cfg.bst.tabBandSetChanged)
# connect close tab
cfg.ui.Band_set_tabWidget.tabCloseRequested.connect(cfg.bst.closeBandSetTab)
# combo layer
cfg.ui.image_raster_name_combo.currentIndexChanged.connect(cfg.bst.rasterLayerName)
# connect to import band set button
cfg.ui.import_bandset_toolButton.clicked.connect(cfg.bst.importBandSet)
# connect to export band set button
cfg.ui.export_bandset_toolButton.clicked.connect(cfg.bst.exportBandSet)
# connect to satellite wavelength combo
cfg.ui.wavelength_sat_combo.currentIndexChanged.connect(cfg.bst.satelliteWavelength)
# connect to unit combo
cfg.ui.unit_combo.currentIndexChanged.connect(cfg.bst.setBandUnit)
# connect to date edit
cfg.ui.bandset_dateEdit.dateChanged.connect(cfg.bst.setBandsetDate)
# connect to band set process button
cfg.ui.band_set_process_toolButton.clicked.connect(cfg.bst.performBandSetTools)
# connect to filter
cfg.ui.bands_filter_lineEdit.textChanged.connect(cfg.bst.filterTable)
''' Pre processing tab '''
''' Clip multiple rasters '''
# connect to clip button
cfg.ui.clip_Button.clicked.connect(cfg.clipMulti.clipRastersAction)
cfg.ui.clip_multiple_rasters.clicked.connect(cfg.batchT.setFunctionButton)
# connect to activate UL pointer
cfg.ui.selectUL_toolButton.clicked.connect(cfg.clipMulti.pointerActive)
# connect to refresh shape button
cfg.ui.toolButton_reload_8.clicked.connect(cfg.clipMulti.refreshShapeClip)
cfg.ui.show_area_radioButton_3.clicked.connect(cfg.clipMulti.showHideArea)
cfg.ui.shapefile_checkBox.stateChanged.connect(cfg.clipMulti.checkboxShapeChanged)
cfg.ui.temporary_ROI_checkBox.stateChanged.connect(cfg.clipMulti.checkboxTempROIChanged)
# connect the shapefile combo
cfg.ui.shapefile_comboBox.currentIndexChanged.connect(cfg.clipMulti.referenceLayerName)
''' Stack raster bands '''
# connect to stack button
cfg.ui.stack_Button.clicked.connect(cfg.stackRstr.stackAction)
cfg.ui.stack_raster_bands.clicked.connect(cfg.batchT.setFunctionButton)
''' Spectral change band sets '''
# connect to calculate button
cfg.ui.spectral_distance_bandsets_toolButton.clicked.connect(cfg.spclDstBS.calculateDistanceAction)
cfg.ui.spectral_distance.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.min_distance_radioButton_2.clicked.connect(cfg.spclDstBS.radioMinDistChanged)
cfg.ui.spectral_angle_map_radioButton_2.clicked.connect(cfg.spclDstBS.radioSAMChanged)
''' Mosaic band sets '''
# connect to mosaic button
cfg.ui.mosaic_bandsets_toolButton.clicked.connect(cfg.mosaicBS.mosaicAction)
cfg.ui.mosaic_bandsets.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.mosaic_band_sets_lineEdit.textChanged.connect(cfg.mosaicBS.textChanged)
''' Cloud masking '''
# connect to mask button
cfg.ui.cloud_mask_toolButton.clicked.connect(cfg.cloudMsk.maskAction)
cfg.ui.cloud_masking.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.cloud_mask_classes_lineEdit.textChanged.connect(cfg.cloudMsk.textChanged)
# connect to refresh button
cfg.ui.toolButton_reload_23.clicked.connect(cfg.utls.refreshClassificationLayer)
''' ASTER tab '''
# connect to input button
cfg.ui.toolButton_directoryInput_ASTER.clicked.connect(cfg.ASTERT.inputASTER)
cfg.ui.ASTER_tableWidget.cellChanged.connect(cfg.ASTERT.editedCell)
cfg.ui.earth_sun_dist_lineEdit_2.textChanged.connect(cfg.ASTERT.editedEarthSunDist)
cfg.ui.sun_elev_lineEdit_2.textChanged.connect(cfg.ASTERT.editedSunElevation)
cfg.ui.date_lineEdit_2.textChanged.connect(cfg.ASTERT.editedDate)
cfg.ui.pushButton_Conversion_3.clicked.connect(cfg.ASTERT.performASTERCorrection)
cfg.ui.aster_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.pushButton_remove_band_2.clicked.connect(cfg.ASTERT.removeHighlightedBand)
''' MODIS tab '''
# connect to input button
cfg.ui.toolButton_directoryInput_MODIS.clicked.connect(cfg.MODIST.inputMODIS)
cfg.ui.MODIS_tableWidget.cellChanged.connect(cfg.MODIST.editedCell)
cfg.ui.pushButton_Conversion_4.clicked.connect(cfg.MODIST.performMODISConversion)
cfg.ui.modis_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.pushButton_remove_band_3.clicked.connect(cfg.MODIST.removeHighlightedBand)
''' Landsat tab '''
# connect to input button
cfg.ui.toolButton_directoryInput.clicked.connect(cfg.landsatT.inputLandsat)
cfg.ui.toolButton_directoryInput_MTL.clicked.connect(cfg.landsatT.inputMTL)
cfg.ui.pushButton_Conversion.clicked.connect(cfg.landsatT.performLandsatCorrection)
cfg.ui.landsat_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.pushButton_remove_band.clicked.connect(cfg.landsatT.removeHighlightedBand)
cfg.ui.landsat_tableWidget.cellChanged.connect(cfg.landsatT.editedCell)
cfg.ui.earth_sun_dist_lineEdit.textChanged.connect(cfg.landsatT.editedEarthSunDist)
cfg.ui.sun_elev_lineEdit.textChanged.connect(cfg.landsatT.editedSunElevation)
cfg.ui.date_lineEdit.textChanged.connect(cfg.landsatT.editedDate)
cfg.ui.satellite_lineEdit.textChanged.connect(cfg.landsatT.editedSatellite)
''' Sentinel-1 tab '''
# connect to input button
cfg.ui.S1_toolButton_fileInput.clicked.connect(cfg.sentinel1T.inputSentinel)
cfg.ui.S1_toolButton_directoryInput_xml.clicked.connect(cfg.sentinel1T.inputXML)
cfg.ui.pushButton_Conversion_6.clicked.connect(cfg.sentinel1T.performSentinelConversion)
cfg.ui.sentinel1_conversion.clicked.connect(cfg.batchT.setFunctionButton)
''' Sentinel-2 tab '''
# connect to input button
cfg.ui.S2_toolButton_directoryInput.clicked.connect(cfg.sentinel2T.inputSentinel)
cfg.ui.pushButton_Conversion_2.clicked.connect(cfg.sentinel2T.performSentinelConversion)
cfg.ui.sentinel2_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.S2_satellite_lineEdit.textChanged.connect(cfg.sentinel2T.editedSatellite)
cfg.ui.S2_pushButton_remove_band.clicked.connect(cfg.sentinel2T.removeHighlightedBand)
cfg.ui.sentinel_2_tableWidget.cellChanged.connect(cfg.sentinel2T.editedCell)
cfg.ui.S2_toolButton_directoryInput_xml2.clicked.connect(cfg.sentinel2T.inputXML2)
''' Sentinel-3 tab '''
# connect to input button
cfg.ui.S3_toolButton_directoryInput.clicked.connect(cfg.sentinel3T.inputSentinel)
cfg.ui.pushButton_Conversion_5.clicked.connect(cfg.sentinel3T.performSentinelConversion)
cfg.ui.sentinel3_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.S3_pushButton_remove_band.clicked.connect(cfg.sentinel3T.removeHighlightedBand)
''' GOES tab '''
# connect to input button
cfg.ui.GOES_toolButton_directoryInput.clicked.connect(cfg.goesT.inputGOES)
cfg.ui.pushButton_Conversion_8.clicked.connect(cfg.goesT.performGOESConversion)
cfg.ui.goes_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.GOES_pushButton_remove_band.clicked.connect(cfg.goesT.removeHighlightedBand)
''' Classification neighbor tab'''
cfg.ui.class_neighbor_toolButton.clicked.connect(cfg.clssNghbr.classNeighborAction)
cfg.ui.neighbor_pixels.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.toolButton_input_matrix.clicked.connect(cfg.clssNghbr.inputMatrixFile)
''' Reproject raster bands tab '''
# connect to refresh button
cfg.ui.toolButton_reload_25.clicked.connect(cfg.rprjRstBndsT.refreshClassificationLayer)
cfg.ui.use_align_raster_checkBox.stateChanged.connect(cfg.rprjRstBndsT.checkboxAlignChanged)
cfg.ui.use_epsg_checkBox.stateChanged.connect(cfg.rprjRstBndsT.checkboxEPSGChanged)
# connect to reproject raster button
cfg.ui.reproject_Button.clicked.connect(cfg.rprjRstBndsT.reprojectRasterBands)
cfg.ui.reproject_raster_bands.clicked.connect(cfg.batchT.setFunctionButton)
''' Split tab '''
# connect the classification combo
cfg.ui.raster_name_combo.currentIndexChanged.connect(cfg.splitT.rasterLayerName)
# connect to refresh button
cfg.ui.toolButton_reload_9.clicked.connect(cfg.splitT.refreshClassificationLayer)
# connect to split raster button
cfg.ui.split_Button.clicked.connect(cfg.splitT.splitRaster)
cfg.ui.split_raster_bands.clicked.connect(cfg.batchT.setFunctionButton)
''' PCA tab '''
# connect to PCA button
cfg.ui.pca_Button.clicked.connect(cfg.pcaT.calculatePCAAction)
cfg.ui.pca.clicked.connect(cfg.batchT.setFunctionButton)
''' K-means tab '''
# connect to kmeans button
cfg.ui.kmeans_Button.clicked.connect(cfg.clusteringT.calculateClusteringAction)
cfg.ui.clustering.clicked.connect(cfg.batchT.setFunctionButton)
# connect the algorithm combo
cfg.ui.kmean_minmax_radioButton.clicked.connect(cfg.clusteringT.radiokmean_minmaxChanged)
cfg.ui.kmean_siglist_radioButton.clicked.connect(cfg.clusteringT.radiokmean_siglistChanged)
cfg.ui.kmean_randomsiglist_radioButton.clicked.connect(cfg.clusteringT.radiokmean_randomsiglistChanged)
cfg.ui.kmeans_radioButton.clicked.connect(cfg.clusteringT.radioKmeansChanged)
cfg.ui.isodata_radioButton.clicked.connect(cfg.clusteringT.radioIsodataChanged)
cfg.ui.min_distance_radioButton.clicked.connect(cfg.clusteringT.radioMinDistChanged)
cfg.ui.spectral_angle_map_radioButton.clicked.connect(cfg.clusteringT.radioSAMChanged)
''' Random forest tab '''
# connect to calculate button
cfg.ui.button_random_forest.clicked.connect(cfg.rndmFrst.performRandomForest)
cfg.ui.random_forest.clicked.connect(cfg.batchT.setFunctionButton)
# connect the macroclass checkBox
cfg.ui.macroclass_checkBox_rf.stateChanged.connect(cfg.rndmFrst.macroclassCheckbox)
cfg.ui.class_checkBox_rf.stateChanged.connect(cfg.rndmFrst.classCheckbox)
cfg.ui.classifier_Button.clicked.connect(cfg.rndmFrst.selectRFClassifier)
# connect to reset classifier
cfg.ui.resetClassifierButton.clicked.connect(cfg.rndmFrst.resetRFClassifier)
''' Vector to Raster tab '''
cfg.ui.toolButton_reload_16.clicked.connect(cfg.vctRstrT.reloadVectorList)
cfg.ui.toolButton_reload_17.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.convert_vector_toolButton.clicked.connect(cfg.vctRstrT.convertToRasterAction)
cfg.ui.vector_to_raster.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.vector_name_combo.currentIndexChanged.connect(cfg.utls.refreshVectorFields)
cfg.ui.field_checkBox.stateChanged.connect(cfg.vctRstrT.checkboxFieldChanged)
cfg.ui.constant_value_checkBox.stateChanged.connect(cfg.vctRstrT.checkboxConstantValueChanged)
''' Post processing tab '''
''' accuracy tab '''
# connect the classification combo
cfg.ui.classification_name_combo.currentIndexChanged.connect(cfg.acc.classificationLayerName)
# connect to refresh button
cfg.ui.toolButton_reload_4.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect the reference combo
cfg.ui.reference_name_combo.currentIndexChanged.connect(cfg.acc.referenceLayerName)
# connect to refresh button
cfg.ui.buttonReload_shape_4.clicked.connect(cfg.acc.refreshReferenceLayer)
# connect to calculate error matrix button
cfg.ui.calculateMatrix_toolButton.clicked.connect(cfg.acc.calculateErrorMatrix)
cfg.ui.accuracy.clicked.connect(cfg.batchT.setFunctionButton)
''' Land cover change '''
# connect to refresh button reference classification
cfg.ui.toolButton_reload_5.clicked.connect(cfg.landCC.refreshClassificationReferenceLayer)
# connect to refresh button new classification
cfg.ui.toolButton_reload_6.clicked.connect(cfg.landCC.refreshNewClassificationLayer)
# connect the classification reference combo
cfg.ui.classification_reference_name_combo.currentIndexChanged.connect(cfg.landCC.classificationReferenceLayerName)
# connect the new classification combo
cfg.ui.new_classification_name_combo.currentIndexChanged.connect(cfg.landCC.newClassificationLayerName)
# connect the mask unchanged checkBox
cfg.ui.mask_unchanged_checkBox.stateChanged.connect(cfg.landCC.maskUnchangedCheckbox)
# connect to calculate land cover change button
cfg.ui.calculateLandCoverChange_toolButton.clicked.connect(cfg.landCC.landCoverChangeAction)
cfg.ui.land_cover_change.clicked.connect(cfg.batchT.setFunctionButton)
''' Classification report '''
# connect to refresh button
cfg.ui.toolButton_reload_10.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect to calculate button
cfg.ui.calculateReport_toolButton.clicked.connect(cfg.classRep.calculateClassReport)
cfg.ui.classification_report.clicked.connect(cfg.batchT.setFunctionButton)
''' Band set combination tab '''
# connect to calculate button
cfg.ui.calculateBandSetComb_toolButton.clicked.connect(cfg.bsComb.calculateBandSetCombination)
cfg.ui.band_combination.clicked.connect(cfg.batchT.setFunctionButton)
''' Cross classification tab '''
# connect the classification combo
cfg.ui.classification_name_combo_2.currentIndexChanged.connect(cfg.crossC.classificationLayerName)
# connect to refresh button
cfg.ui.toolButton_reload_21.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect the reference combo
cfg.ui.reference_name_combo_2.currentIndexChanged.connect(cfg.crossC.referenceLayerName)
# connect to refresh button
cfg.ui.buttonReload_shape_5.clicked.connect(cfg.crossC.refreshReferenceLayer)
# connect to calculate error matrix button
cfg.ui.calculatecrossClass_toolButton.clicked.connect(cfg.crossC.calculateCrossClassification)
cfg.ui.cross_classification.clicked.connect(cfg.batchT.setFunctionButton)
''' Class signature '''
# connect to calculate signature
cfg.ui.class_signature_Button.clicked.connect(cfg.classSigT.calculateClassSignatureAction)
cfg.ui.class_signature.clicked.connect(cfg.batchT.setFunctionButton)
# connect to refresh button
cfg.ui.toolButton_reload_22.clicked.connect(cfg.utls.refreshClassificationLayer)
''' Classification to vector '''
# connect to refresh button
cfg.ui.toolButton_reload_12.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect to convert button
cfg.ui.convert_toolButton.clicked.connect(cfg.classVect.convertClassificationToVectorAction)
cfg.ui.classification_to_vector.clicked.connect(cfg.batchT.setFunctionButton)
''' Reclassification '''
# connect to refresh button
cfg.ui.toolButton_reload_11.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect to reclassify button
cfg.ui.reclassify_toolButton.clicked.connect(cfg.reclassification.reclassifyAction)
cfg.ui.reclassification.clicked.connect(cfg.batchT.setFunctionButton)
# connect to calculate unique values button
cfg.ui.calculate_unique_values_toolButton.clicked.connect(cfg.reclassification.calculateUniqueValues)
# connect to incremental new values button
cfg.ui.incremental_new_values_toolButton.clicked.connect(cfg.reclassification.incrementalNewValues)
# connect to add value button
cfg.ui.add_value_pushButton.clicked.connect(cfg.reclassification.addRowToTable)
# connect to remove point
cfg.ui.remove_row_pushButton.clicked.connect(cfg.reclassification.removePointFromTable)
# connect to import band set button
cfg.ui.import_reclass_toolButton.clicked.connect(cfg.reclassification.importReclass)
# connect to export band set button
cfg.ui.export_reclass_toolButton.clicked.connect(cfg.reclassification.exportReclass)
# connect to edited cell
cfg.ui.reclass_values_tableWidget.cellChanged.connect(cfg.reclassification.editedCell)
''' Edit Raster tab'''
# connect to set value
cfg.ui.raster_set_value_toolButton.clicked.connect(cfg.editRstr.setRasterValueAction)
cfg.ui.edit_raster_using_vector.clicked.connect(cfg.batchT.setFunctionButton)
# connect to refresh rasters button
cfg.ui.toolButton_reload_14.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.undo_edit_Button.clicked.connect(cfg.editRstr.undoEdit)
# connect the expression text
cfg.ui.expression_lineEdit.textChanged.connect(cfg.editRstr.textChanged)
cfg.ui.use_constant_val_checkBox.stateChanged.connect(cfg.editRstr.checkboxConstantValChanged)
cfg.ui.use_field_vector_checkBox.stateChanged.connect(cfg.editRstr.checkboxVectorFieldChanged)
cfg.ui.use_expression_checkBox.stateChanged.connect(cfg.editRstr.checkboxUseExpressionChanged)
cfg.ui.edit_val_use_ROI_radioButton.clicked.connect(cfg.editRstr.radioUseROIPolygonChanged)
cfg.ui.edit_val_use_vector_radioButton.clicked.connect(cfg.editRstr.radioUseVectorChanged)
cfg.ui.toolButton_reload_20.clicked.connect(cfg.editRstr.reloadVectorList)
cfg.ui.vector_name_combo_2.currentIndexChanged.connect(cfg.utls.refreshVectorFields2)
''' Classification sieve tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_15.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.sieve_toolButton.clicked.connect(cfg.sieveRstr.sieveClassificationAction)
cfg.ui.classification_sieve.clicked.connect(cfg.batchT.setFunctionButton)
''' Classification erosion tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_18.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.class_erosion_toolButton.clicked.connect(cfg.ersnRstr.erosionClassificationAction)
cfg.ui.classification_erosion.clicked.connect(cfg.batchT.setFunctionButton)
# connect the value text
cfg.ui.erosion_classes_lineEdit.textChanged.connect(cfg.ersnRstr.textChanged)
''' Classification dilation tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_19.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.class_dilation_toolButton.clicked.connect(cfg.dltnRstr.dilationClassificationAction)
cfg.ui.classification_dilation.clicked.connect(cfg.batchT.setFunctionButton)
# connect the value text
cfg.ui.dilation_classes_lineEdit.textChanged.connect(cfg.dltnRstr.textChanged)
''' Classification zonal stat tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_24.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.buttonReload_shape_6.clicked.connect(cfg.znlSttRstT.refreshReferenceLayer)
cfg.ui.zonal_stat_raster_toolButton.clicked.connect(cfg.znlSttRstT.zonalStatRasterAction)
cfg.ui.zonal_stat_raster.clicked.connect(cfg.batchT.setFunctionButton)
# connect the classification combo
cfg.ui.classification_name_combo_5.currentIndexChanged.connect(cfg.znlSttRstT.classificationLayerName)
# connect the reference combo
cfg.ui.reference_name_combo_3.currentIndexChanged.connect(cfg.znlSttRstT.referenceLayerName)
''' Band Calc tab '''
# connect to refresh button
cfg.ui.toolButton_reload_13.clicked.connect(cfg.bCalc.rasterBandName)
# connect to calc button
cfg.ui.toolButton_calculate.clicked.connect(cfg.bCalc.calculateButton)
cfg.ui.band_calc.clicked.connect(cfg.batchT.setFunctionButton)
# connect to import expression button
cfg.ui.toolButton_import_expression.clicked.connect(cfg.bCalc.importExpressionList)
# connect the expression text
cfg.ui.plainTextEdit_calc.textChanged.connect(cfg.bCalc.textChanged)
# connect double click table
cfg.ui.tableWidget_band_calc.doubleClicked.connect(cfg.bCalc.doubleClick)
# connect the intersection checkBox
cfg.ui.intersection_checkBox.stateChanged.connect(cfg.bCalc.intersectionCheckbox)
# connect the extent checkBox
cfg.ui.extent_checkBox.stateChanged.connect(cfg.bCalc.extentCheckbox)
# connect to raster type combo
cfg.ui.raster_type_combo.currentIndexChanged.connect(cfg.bCalc.setRasterType)
# connect to expression buttons
cfg.ui.toolButton_plus.clicked.connect(cfg.bCalc.buttonPlus)
cfg.ui.toolButton_minus.clicked.connect(cfg.bCalc.buttonMinus)
cfg.ui.toolButton_product.clicked.connect(cfg.bCalc.buttonProduct)
cfg.ui.toolButton_ratio.clicked.connect(cfg.bCalc.buttonRatio)
cfg.ui.toolButton_power.clicked.connect(cfg.bCalc.buttonPower)
cfg.ui.toolButton_sqrt.clicked.connect(cfg.bCalc.buttonSQRT)
cfg.ui.toolButton_lbracket.clicked.connect(cfg.bCalc.buttonLbracket)
cfg.ui.toolButton_rbracket.clicked.connect(cfg.bCalc.buttonRbracket)
cfg.ui.toolButton_greater.clicked.connect(cfg.bCalc.buttonGreater)
cfg.ui.toolButton_less.clicked.connect(cfg.bCalc.buttonLower)
cfg.ui.toolButton_equal.clicked.connect(cfg.bCalc.buttonEqual)
cfg.ui.toolButton_unequal.clicked.connect(cfg.bCalc.buttonUnequal)
cfg.ui.band_calc_function_tableWidget.doubleClicked.connect(cfg.bCalc.setFunction)
# decision rules
cfg.ui.decision_rules_tableWidget.cellChanged.connect(cfg.bCalc.editedDecisionRulesTable)
cfg.ui.band_calc_tabWidget.currentChanged.connect(cfg.bCalc.tabChanged)
# connect to add rule
cfg.ui.add_rule_toolButton.clicked.connect(cfg.bCalc.addRowToTable)
cfg.ui.remove_rule_toolButton.clicked.connect(cfg.bCalc.removeHighlightedRule)
# connect to clear button
cfg.ui.clear_rules_toolButton.clicked.connect(cfg.bCalc.clearRulesAction)
cfg.ui.export_rules_toolButton.clicked.connect(cfg.bCalc.exportRules)
cfg.ui.import_rules_toolButton.clicked.connect(cfg.bCalc.importRules)
cfg.ui.move_up_toolButton_2.clicked.connect(cfg.bCalc.moveUpRule)
cfg.ui.move_down_toolButton_2.clicked.connect(cfg.bCalc.moveDownRule)
# connect to filter
cfg.ui.bandcalc_filter_lineEdit.textChanged.connect(cfg.bCalc.filterTable)
''' Batch tab '''
# connect the batch text
#cfg.ui.plainTextEdit_batch.textChanged.connect(cfg.batchT.textChanged)
# connect to calc button
cfg.ui.toolButton_run_batch.clicked.connect(cfg.batchT.runButton)
cfg.ui.check_batch.clicked.connect(cfg.batchT.textChanged)
cfg.ui.clear_batch_toolButton.clicked.connect(cfg.batchT.clearBatch)
cfg.ui.export_batch_toolButton.clicked.connect(cfg.batchT.exportBatch)
cfg.ui.import_batch_toolButton.clicked.connect(cfg.batchT.importBatch)
# connect to table double click
cfg.ui.batch_tableWidget.doubleClicked.connect(cfg.batchT.setFunction)
''' Settings tab '''
# connect the ID field name line
cfg.ui.ID_field_name_lineEdit.textChanged.connect(cfg.sets.IDFieldNameChange)
# connect the macroclass ID field name line
cfg.ui.MID_field_name_lineEdit.textChanged.connect(cfg.sets.MacroIDFieldNameChange)
# connect the macroclass Info field name line
cfg.ui.MCInfo_field_name_lineEdit.textChanged.connect(cfg.sets.MacroInfoFieldNameChange)
# connect the Info field name line
cfg.ui.Info_field_name_lineEdit.textChanged.connect(cfg.sets.InfoFieldNameChange)
# connect the variable name line
cfg.ui.variable_name_lineEdit.textChanged.connect(cfg.sets.VariableNameChange)
# connect the group name line
cfg.ui.group_name_lineEdit.textChanged.connect(cfg.sets.GroupNameChange)
# connect the SMTP line
cfg.ui.smtp_server_lineEdit.textChanged.connect(cfg.sets.SMTPServerChange)
# connect the SMTP to emails line
cfg.ui.to_email_lineEdit.textChanged.connect(cfg.sets.SMTPtoEmailsChange)
# connect the SMTP user
cfg.ui.smtp_user_lineEdit.editingFinished.connect(cfg.sets.rememberUser)
# connect the SMTP password
cfg.ui.smtp_password_lineEdit.editingFinished.connect(cfg.sets.rememberUser)
# connect the SMTP checkbox
cfg.ui.remeber_settings_checkBox.stateChanged.connect(cfg.sets.rememberUserCheckbox)
# connect the SMTP checkBox
cfg.ui.smtp_checkBox.stateChanged.connect(cfg.sets.SMTPCheckbox)
# connect to reset field names button
cfg.ui.reset_field_names_Button.clicked.connect(cfg.sets.resetFieldNames)
# connect to reset variable name button
cfg.ui.reset_variable_name_Button.clicked.connect(cfg.sets.resetVariableName)
# connect to reset group name button
cfg.ui.reset_group_name_Button.clicked.connect(cfg.sets.resetGroupName)
# connect the log file checkBox
cfg.ui.log_checkBox.stateChanged.connect(cfg.sets.logCheckbox)
# connect the download news checkBox
cfg.ui.download_news_checkBox.stateChanged.connect(cfg.sets.downloadNewsCheckbox)
# connect the virtual raster checkBox
cfg.ui.virtual_raster_load_checkBox.stateChanged.connect(cfg.sets.virtualRasterCheckbox)
# connect the sound checkBox
cfg.ui.sound_checkBox.stateChanged.connect(cfg.sets.soundCheckbox)
# connect the virtual raster format checkBox
cfg.ui.virtual_raster_checkBox.stateChanged.connect(cfg.sets.virtualRasterFormatCheckbox)
# connect the raster compression checkBox
cfg.ui.raster_compression_checkBox.stateChanged.connect(cfg.sets.rasterCompressionCheckbox)
# connect the parallel writing checkBox
cfg.ui.parallel_writing_checkBox.stateChanged.connect(cfg.sets.parallelWritingCheckbox)
# connect to change temporary directory button
cfg.ui.temp_directory_Button.clicked.connect(cfg.sets.changeTempDir)
# connect to reset temporary directory button
cfg.ui.reset_temp_directory_Button.clicked.connect(cfg.sets.resetTempDir)
# connect to clear log button
cfg.ui.clearLog_Button.clicked.connect(cfg.utls.clearLogFile)
# connect to export log button
cfg.ui.exportLog_Button.clicked.connect(cfg.sets.copyLogFile)
# connect to test dependencies button
cfg.ui.test_dependencies_Button.clicked.connect(cfg.sets.testDependencies)
# connect to RAM spinbox
cfg.ui.RAM_spinBox.valueChanged.connect(cfg.sets.RAMSettingChange)
# connect to thread spinbox
cfg.ui.CPU_spinBox.valueChanged.connect(cfg.sets.threadSettingChange)
# connect the Python path line
cfg.ui.python_path_lineEdit.textChanged.connect(cfg.sets.PythonPathSettingChange)
# connect the Python modules path line
cfg.ui.python_path_lineEdit_2.textChanged.connect(cfg.sets.PythonModulePathSettingChange)
# connect the GDAL path line
cfg.ui.gdal_path_lineEdit.textChanged.connect(cfg.sets.GDALPathSettingChange)
# connect to change color button
cfg.ui.change_color_Button.clicked.connect(cfg.sets.changeROIColor)
# connect to change color button
cfg.ui.reset_color_Button.clicked.connect(cfg.sets.resetROIStyle)
# connect to transparency slider
cfg.ui.transparency_Slider.valueChanged.connect(cfg.sets.changeROITransparency)
# first install
if cfg.firstInstallVal == 'Yes':
cfg.utls.welcomeTab()
cfg.utls.setQGISRegSetting(cfg.regFirstInstall, 'No')
cfg.utls.findAvailableRAM()
cfg.utls.findAvailableProcessors()
# welcome message
lWelcome = cfg.plgnDir + '/ui/welcome.html'
htmlTextF = open(lWelcome, 'r')
htmlText = htmlTextF.read()
cfg.uidc.main_textBrowser.clear()
cfg.uidc.main_textBrowser.setHtml(htmlText)
htmlTextF.close()
if cfg.osSCP.path.isfile(cfg.plgnDir + '/firstrun'):
cfg.ipt.welcomeText('https://semiautomaticgit.github.io/SemiAutomaticClassificationPluginWelcome/changelog.html')
cfg.osSCP.remove(cfg.plgnDir + '/firstrun')
else:
dateV = cfg.datetimeSCP.datetime.now()
dStr = dateV.strftime('%Y_%m_%d')
cfg.ipt.welcomeText('https://semiautomaticgit.github.io/SemiAutomaticClassificationPluginWelcome/welcome' + '_' + dStr + '.html', 'https://semiautomaticgit.github.io/SemiAutomaticClassificationPluginWelcome/welcome.html')
cfg.utls.cleanOldTempDirectory()
cfg.skipRegistry = False
else:
dockclassdlg = DockClassDialog(qgisUtils.iface.mainWindow(), qgisUtils.iface)
qgisUtils.iface.removeDockWidget(dockclassdlg)
# save signature list when saving project
def projectSaved(self):
if cfg.skipProjectSaved == 'No':
if len(cfg.signIDs) > 0:
cfg.SCPD.saveSignatureListToFile()
if cfg.scpFlPath is not None:
cfg.SCPD.saveMemToSHP(cfg.shpLay)
cfg.utls.zipDirectoryInFile(cfg.scpFlPath, cfg.inptDir)
cfg.downProd.saveDownloadTable()
try:
scpPath = cfg.utls.readProjectVariable('trainingLayer', '')
name = cfg.utls.fileNameNoExt(scpPath)
duplicateID = cfg.utls.layerID(name, cfg.shpLay.id())
cfg.qgisCoreSCP.QgsProject.instance().removeMapLayer(duplicateID)
except:
pass
# reset all variables and interface
def resetSCP(self):
# logger
cfg.utls.logToFile(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), 'LOG ACTIVE' + cfg.sysSCPInfo)
cfg.scpFlPath = None
cfg.ui.image_raster_name_combo.blockSignals(True)
cfg.ui.Band_set_tabWidget.blockSignals(True)
cfg.rasterComboEdited = 'No'
cfg.projPath = cfg.qgisCoreSCP.QgsProject.instance().fileName()
cfg.lastSaveDir = cfg.osSCP.path.dirname(cfg.projPath)
cfg.projPath = cfg.qgisCoreSCP.QgsProject.instance().fileName()
cfg.lastSaveDir = cfg.osSCP.path.dirname(cfg.projPath)
cfg.signList = {}
cfg.signIDs = {}
cfg.spectrPlotList = {}
cfg.signPlotIDs = {}
cfg.scatterPlotIDs = {}
cfg.scatterPlotList = {}
cfg.undoIDList = {}
cfg.undoSpectrPlotList = {}
cfg.lstROI = None
cfg.lstROI2 = None
cfg.rpdROICheck = '2'
cfg.vegIndexCheck = 2
cfg.sigClcCheck = 2
cfg.utls.clearTable(cfg.uisp.signature_list_plot_tableWidget)
cfg.utls.clearTable(cfg.uiscp.scatter_list_plot_tableWidget)
cfg.utls.clearTable(cfg.ui.signature_threshold_tableWidget)
cfg.utls.clearTable(cfg.ui.download_images_tableWidget)
cfg.utls.clearTable(cfg.ui.LCS_tableWidget)
cfg.treeDockItm = {}
cfg.treeDockMCItm = {}
cfg.SCPD.clearTree()
cfg.scaPlT.scatterPlotListTable(cfg.uiscp.scatter_list_plot_tableWidget)
cfg.spSigPlot.refreshPlot()
cfg.LCSignT.LCSignatureThresholdListTable()
# reload layers in combos
cfg.ipt.refreshRasterLayer()
cfg.utls.refreshVectorLayer()
cfg.utls.refreshClassificationLayer()
cfg.utls.refreshRasterExtent()
cfg.acc.refreshReferenceLayer()
cfg.crossC.refreshReferenceLayer()
cfg.znlSttRstT.refreshReferenceLayer()
cfg.znlSttRstT.loadStatisticCombo()
cfg.clssNghbr.loadStatisticCombo()
cfg.landCC.refreshClassificationReferenceLayer()
cfg.landCC.refreshNewClassificationLayer()
# read variables
cfg.utls.readVariables()
# set ROI color
cfg.ui.change_color_Button.setStyleSheet('background-color :' + cfg.ROIClrVal)
# set ROI transparency
cfg.ui.transparency_Slider.setValue(cfg.ROITrnspVal)
# set RAM value
cfg.ui.RAM_spinBox.setValue(cfg.RAMValue)
# set CPU value
cfg.ui.CPU_spinBox.setValue(cfg.threads)
# rapid ROI band
cfg.uidc.rapidROI_band_spinBox.setValue(int(cfg.ROIband))
# min ROI size
cfg.Min_region_size_spin.setValue(int(cfg.minROISz))
# max ROI width
cfg.Max_ROI_width_spin.setValue(int(cfg.maxROIWdth))
# range radius
cfg.Range_radius_spin.setValue(float(cfg.rngRad))
# ROI ID field
cfg.uidc.ROI_ID_spin.setValue(int(cfg.ROIID))
# ROI macro ID field
cfg.uidc.ROI_Macroclass_ID_spin.setValue(int(cfg.ROIMacroID))
# preview size
cfg.preview_size_spinBox.setValue(float(cfg.prvwSz))
# set ID field name line
cfg.ui.ID_field_name_lineEdit.setText(cfg.fldID_class)
cfg.ui.MID_field_name_lineEdit.setText(cfg.fldMacroID_class)
# set Info field name line
cfg.ui.Info_field_name_lineEdit.setText(cfg.fldROI_info)
cfg.ui.MCInfo_field_name_lineEdit.setText(cfg.fldROIMC_info)
cfg.ui.variable_name_lineEdit.setText(cfg.variableName)
cfg.ui.group_name_lineEdit.setText(cfg.grpNm)
# gdal path
cfg.ui.gdal_path_lineEdit.setText(cfg.gdalPath)
cfg.ui.python_path_lineEdit.setText(cfg.PythonPathSettings)
cfg.ui.python_path_lineEdit_2.setText(cfg.PythonModulesPathSettings)
# set signature calculation checkbox state
try:
cfg.uidc.rapid_ROI_checkBox.setCheckState(int(cfg.rpdROICheck))
except:
pass
# set vegetation index calculation checkbox state
try:
cfg.uidc.display_cursor_checkBox.setCheckState(int(cfg.vegIndexCheck))
except:
pass
# set signature calculation checkbox state
try:
cfg.uidc.signature_checkBox.setCheckState(int(cfg.sigClcCheck))
cfg.ui.signature_checkBox2.setCheckState(int(cfg.sigClcCheck))
except:
pass
# set save input checkbox state
try:
cfg.uidc.save_input_checkBox.setCheckState(int(cfg.saveInputCheck))
except:
pass
# load classification algorithm
idAlg = cfg.ui.algorithm_combo.findText(cfg.algName)
if idAlg >= 0:
cfg.ui.algorithm_combo.setCurrentIndex(idAlg)
else:
cfg.ui.algorithm_combo.setCurrentIndex(0)
cfg.algName = cfg.algMinDist
# ROI info
cfg.uidc.ROI_Class_line.setText(cfg.ROIInfo)
cfg.uidc.ROI_Macroclass_line.setText(cfg.ROIMacroClassInfo)
cfg.uidc.custom_index_lineEdit.setText(cfg.customExpression)
# RGB list
cfg.RGBLT.RGBListTable(cfg.RGBList)
# reload raster bands in checklist
cfg.bst.rasterBandName()
cfg.rasterComboEdited = 'Yes'
cfg.ui.image_raster_name_combo.blockSignals(False)
cfg.ui.Band_set_tabWidget.blockSignals(False)
# new project
def newProjectLoaded(self):
# clear band set
t = cfg.ui.Band_set_tabWidget.count()
for index in reversed(list(range(0, t))):
cfg.bst.deleteBandSetTab(index)
self.resetSCP()
cfg.bCalc.rasterBandName()
cfg.SCPD.openInput()
cfg.bstLT.BandSetListTable()
# read project variables
def projectLoaded(self):
self.resetSCP()
# load product download table
cfg.downProd.openDownloadTable()
cfg.bCalc.rasterBandName()
cfg.SCPD.openInput()
cfg.bstLT.BandSetListTable()
# run
def run(self):
# show the dialog
cfg.dlg.show()
# Run the dialog event loop
pointer_result = cfg.dlg.exec_()
# remove plugin menu and icon
def unload(self):
cfg.utls.createBackupFile(cfg.scpFlPath)
# save window size
try:
cfg.utls.setQGISRegSetting(cfg.regWindowSizeW, cfg.dlg.size().width())
cfg.utls.setQGISRegSetting(cfg.regWindowSizeH, cfg.dlg.size().height())
except:
pass
try:
qgisUtils.iface.removeDockWidget(cfg.dockclassdlg)
del cfg.toolBar2
del cfg.toolBar3
cfg.menu.deleteLater()
# remove temp files
if cfg.tmpDir is not None and cfg.QDirSCP(cfg.tmpDir).exists():
cfg.shutilSCP.rmtree(cfg.tmpDir, True)
oDir = cfg.utls.makeDirectory(str(cfg.QDirSCP.tempPath() + '/' + cfg.tempDirName))
except:
if PluginCheck == 'Yes':
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please, restart QGIS for executing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
| gpl-3.0 |
ran5515/DeepDecision | tensorflow/examples/learn/text_classification_cnn.py | 29 | 5677 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
drammock/mne-python | tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py | 10 | 5666 | """
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax, event_id = -0.3, 0.6, 1
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# just use right temporal sensors for speed
epochs.pick_channels(mne.read_vectorview_selection('Right-temporal'))
evoked = epochs.average()
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
freqs = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
tfr_epochs = tfr_morlet(epochs, freqs, n_cycles=4., decim=decim,
average=False, return_itc=False, n_jobs=1)
# Baseline power
tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))
# Crop in time to keep only what is between 0 and 400 ms
evoked.crop(-0.1, 0.4)
tfr_epochs.crop(-0.1, 0.4)
epochs_power = tfr_epochs.data
###############################################################################
# Define adjacency for statistics
# -------------------------------
# To compute a cluster-corrected value, we need a suitable definition
# for the adjacency/adjacency of our values. So we first compute the
# sensor adjacency, then combine that with a grid/lattice adjacency
# assumption for the time-frequency plane:
sensor_adjacency, ch_names = mne.channels.find_ch_adjacency(
tfr_epochs.info, 'grad')
# Subselect the channels we are actually using
use_idx = [ch_names.index(ch_name.replace(' ', ''))
for ch_name in tfr_epochs.ch_names]
sensor_adjacency = sensor_adjacency[use_idx][:, use_idx]
assert sensor_adjacency.shape == \
(len(tfr_epochs.ch_names), len(tfr_epochs.ch_names))
assert epochs_power.data.shape == (
len(epochs), len(tfr_epochs.ch_names),
len(tfr_epochs.freqs), len(tfr_epochs.times))
adjacency = mne.stats.combine_adjacency(
sensor_adjacency, len(tfr_epochs.freqs), len(tfr_epochs.times))
# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
len(tfr_epochs.ch_names) * len(tfr_epochs.freqs) * len(tfr_epochs.times)
###############################################################################
# Compute statistic
# -----------------
threshold = 3.
n_permutations = 50 # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
threshold=threshold, tail=0,
adjacency=adjacency,
out_type='mask', verbose=True)
###############################################################################
# View time-frequency plots
# -------------------------
evoked_data = evoked.data
times = 1e3 * evoked.times
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
# Just plot one channel's data
ch_idx, f_idx, t_idx = np.unravel_index(
np.nanargmax(np.abs(T_obs_plot)), epochs_power.shape[1:])
# ch_idx = tfr_epochs.ch_names.index('MEG 1332') # to show a specific one
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.subplot(2, 1, 1)
plt.imshow(T_obs[ch_idx], cmap=plt.cm.gray,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot[ch_idx], cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(f'Induced power ({tfr_epochs.ch_names[ch_idx]})')
ax2 = plt.subplot(2, 1, 2)
evoked.plot(axes=[ax2], time_unit='s')
plt.show()
| bsd-3-clause |
nikhilgahlawat/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
pdamodaran/yellowbrick | tests/rand.py | 1 | 3176 | # tests.random
# A visualizer that draws a random scatter plot for testing.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Wed Mar 21 17:51:15 2018 -0400
#
# ID: random.py [] benjamin@bengfort.com $
"""
A visualizer that draws a random scatter plot for testing.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from yellowbrick.base import Visualizer
from yellowbrick.style import resolve_colors
from sklearn.datasets import make_blobs
##########################################################################
## Random Visualizer
##########################################################################
class RandomVisualizer(Visualizer):
"""
Creates random scatter plots as a testing utility.
Data generation uses scikit-learn make_blobs to create scatter plots that
have reasonable visual features and multiple colors.
Parameters
----------
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
n_samples : int, default: 100
The number of points to generate for the scatter plot
n_blobs : int or array of shape [n_centers, 2]
Define the number of blobs to create or specify their centers.
random_state : int, RandomState or None:
Used to specify the seed of the random state to ensure tests work.
"""
def __init__(self, ax=None, n_samples=100, n_blobs=3,
random_state=None, **kwargs):
super(RandomVisualizer, self).__init__(ax=ax, **kwargs)
if isinstance(random_state, (int, float)) or random_state is None:
random_state = np.random.RandomState(random_state)
self.set_params(
n_samples=n_samples, n_blobs=n_blobs, random_state=random_state,
)
def generate(self):
"""
Returns random data according to the visualizer specification.
Returns
-------
X : array of shape [n_samples, 2]
2 dimensional array of points to plot
y : vector with length n_samples
Center/blob each point belongs to (used for color)
"""
return make_blobs(
self.n_samples, 2, self.n_blobs, random_state=self.random_state
)
def fit(self, *args, **kwargs):
X, c = self.generate()
x = X[:,0]
y = X[:,1]
self.draw(x, y, c)
return self
def draw(self, x, y, c):
colors = resolve_colors(self.n_blobs)
for i in np.arange(self.n_blobs):
mask = c==i
label = "c{}".format(i)
self.ax.scatter(x[mask], y[mask], label=label, c=colors[i])
return self.ax
def finalize(self):
self.ax.legend(frameon=True)
self.ax.set_ylabel("$y$")
self.ax.set_xlabel("$x$")
self.ax.set_title("Random Scatter Plot")
return self.ax
if __name__ == '__main__':
r = RandomVisualizer()
r.fit()
r.poof(outpath='test.png')
| apache-2.0 |
mortada/tensorflow | tensorflow/examples/learn/boston.py | 33 | 1981 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Transform
x_transformed = scaler.transform(x_test)
# Predict and score
y_predicted = list(regressor.predict(x_transformed, as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
start-jsk/jsk_apc | jsk_apc2016_common/python/jsk_apc2016_common/rbo_segmentation/evaluate.py | 1 | 7455 | from apc_data import APCDataSet, APCSample
from probabilistic_segmentation import ProbabilisticSegmentationRF, ProbabilisticSegmentationBP
import pickle
import os
import matplotlib.pyplot as plt
import numpy as np
import copy
import rospkg
def _fast_hist(a, b, n):
k = (a >= 0) & (a < n)
hist = np.bincount(n * a[k].astype(int) +
b[k], minlength=n**2).reshape(n, n)
return hist
def label_accuracy_score(label_true, label_pred, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = _fast_hist(label_true.flatten(), label_pred.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum().astype(np.float64)
acc_cls = np.diag(hist) / hist.sum(axis=1).astype(np.float64)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)).astype(np.float64)
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum().astype(np.float64)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc
# previously declared in main.py
def combine_datasets(datasets):
samples = []
for d in datasets:
samples += d.samples
return APCDataSet(samples=samples)
def load_datasets(dataset_names, data_path, cache_path):
datasets = dict()
for dataset_name in dataset_names:
dataset_path = os.path.join(
data_path, 'rbo_apc/{}'.format(dataset_name))
datasets[dataset_name] = APCDataSet(
name=dataset_name, dataset_path=dataset_path,
cache_path=cache_path, load_from_cache=True)
return datasets
def evaluate(bp, test_data):
acc_list = []
acc_cls_list = []
mean_iu_list = []
fwavacc_list = []
for sample in test_data.samples:
if len(sample.object_masks) == 0:
continue
pred_target = sample.object_masks.keys()[0]
if pred_target == 'shelf':
if len(sample.object_masks.keys()) == 1:
continue
pred_target = sample.object_masks.keys()[1]
bp.predict(sample, pred_target)
print 'done'
images = []
images.append(bp.posterior_images_smooth['shelf'])
objects = []
objects.append('shelf')
for _object in bp.posterior_images_smooth.keys():
if _object != 'shelf':
images.append(bp.posterior_images_smooth[_object])
objects.append(_object)
pred = np.argmax(np.array(images), axis=0)
# remove dataset that does not have complete set
objects_copy = copy.copy(objects)
object_masks_keys = sample.object_masks.keys()
if 'shelf' in objects_copy: objects_copy.remove('shelf')
if 'shelf' in object_masks_keys: object_masks_keys.remove('shelf')
if set(objects_copy) != set(object_masks_keys):
#print 'skip posterior_image keys ', objects_copy
#print 'skip object_mask keys ', object_masks_keys
continue
true = np.zeros_like(pred)
for i, _object in enumerate(objects):
if _object != 'shelf':
true[sample.object_masks[_object]] = i
masked_pred = pred[sample.bin_mask]
masked_true = true[sample.bin_mask]
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(masked_true, masked_pred, len(objects))
acc_list.append(acc)
acc_cls_list.append(acc_cls)
mean_iu_list.append(mean_iu)
fwavacc_list.append(fwavacc)
"""
label_pred = np.zeros(pred.shape[1:]).astype(np.int64)
label_true = np.zeros(pred.shape[1:]).astype(np.int64)
for i in range(pred.shape[0]):
label_pred[pred[i]] = i
label_true[true[i]] = i
label_pred_masked = label_pred[sample.bin_mask]
label_true_masked = label_true[sample.bin_mask]
"""
return acc_list, acc_cls_list, mean_iu_list, fwavacc_list
def create_dataset(dataset_path):
# initialize empty dataset
dataset = APCDataSet(from_pkl=False)
data_file_prefixes = []
key = '.jpg'
for dir_name, sub_dirs, files in os.walk(dataset_path):
for f in files:
if key == f[-len(key):]:
data_file_prefixes.append(
os.path.join(dir_name, f[:-len(key)]))
print data_file_prefixes
for file_prefix in data_file_prefixes:
dataset.samples.append(
APCSample(data_2016_prefix=file_prefix,
labeled=True, is_2016=True, infer_shelf_mask=True))
return dataset
###############################################################################
# prepare dataset #
###############################################################################
#data_path = '/home/leus/ros/indigo/src/start-jsk/jsk_apc/jsk_apc2016_common/data'
#cache_path = os.path.join(data_path, 'cache')
#dataset_path = os.path.join(data_path, 'rbo_apc')
rospack = rospkg.RosPack()
common_path = rospack.get_path('jsk_apc2016_common')
data_path = common_path + '/data/'
dataset_name = 'tokyo_run/single_item_labeled'
dataset_path = os.path.join(data_path, dataset_name)
data = create_dataset(dataset_path)
###############################################################################
# dataset #
###############################################################################
train_data, test_data = data.split_simple(portion_training=0.7)
###############################################################################
# all features #
###############################################################################
all_features = ['color', 'height3D', 'dist2shelf']
params = {
'use_features': all_features,
'segmentation_method': "max_smooth", 'selection_method': "max_smooth",
'make_convex': True, 'do_shrinking_resegmentation': True,
'do_greedy_resegmentation': True}
bp = ProbabilisticSegmentationBP(**params)
bp.fit(train_data)
acc_list, acc_cls_list, mean_iu_list, fwavacc_list = evaluate(bp, test_data)
print 'all features acc ', np.mean(acc_list)
print 'all features acc_cls ', np.mean(acc_cls_list)
print 'all features mean_iu ', np.mean(mean_iu_list)
print 'all features fwavcc ', np.mean(fwavacc_list)
###############################################################################
# # Color only #
###############################################################################
params = {
'use_features': ['color'],
'segmentation_method': "max_smooth", 'selection_method': "max_smooth",
'make_convex': True, 'do_shrinking_resegmentation': True,
'do_greedy_resegmentation': True}
bp = ProbabilisticSegmentationBP(**params)
bp.fit(train_data)
acc_list, acc_cls_list, mean_iu_list, fwavacc_list = evaluate(bp, test_data)
print 'trained only by color features acc ', np.mean(acc_list)
print 'trained only by color features acc_cls ', np.mean(acc_cls_list)
print 'trained only by color features mean_iu ', np.mean(mean_iu_list)
print 'trained only by color features fwavcc ', np.mean(fwavacc_list)
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/svm/tests/test_sparse.py | 35 | 13182 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
licco/zipline | zipline/history/history_container.py | 1 | 18509 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import groupby
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import (
index_at_dt,
)
from zipline.utils.data import RollingPanel
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(field,
buffer_frame,
digest_frame,
pre_digest_values):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# Get values which are NaN at the beginning of the period.
first_bar = buffer_frame.iloc[0]
def iter_nan_sids():
"""
Helper for iterating over the remaining nan sids in first_bar.
"""
return (sid for sid in first_bar[first_bar.isnull()].index)
# Try to fill with the last entry from the digest frame.
if digest_frame is not None:
# We don't store a digest frame for frequencies that only have a bar
# count of 1.
for sid in iter_nan_sids():
buffer_frame[sid][0] = digest_frame.ix[-1, sid]
# If we still have nan sids, try to fill with pre_digest_values.
for sid in iter_nan_sids():
prior_sid_value = pre_digest_values[field].get(sid)
if prior_sid_value:
# If the prior value is greater than the timestamp of our first
# bar.
if prior_sid_value.get('dt', first_bar.name) > first_bar.name:
buffer_frame[sid][0] = prior_sid_value.get('value', np.nan)
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(field, digest_frame, prior_values):
"""
Forward-fill a digest frame, falling back to the last known priof values if
necessary.
"""
if digest_frame is not None:
# Digest frame is None in the case that we only have length 1 history
# specs for a given frequency.
# It's possible that the first bar in our digest frame is storing NaN
# values. If so, check if we've tracked an older value and use that as
# an ffill value for the first bar.
first_bar = digest_frame.ix[0]
nan_sids = first_bar[first_bar.isnull()].index
for sid in nan_sids:
try:
# Only use prior value if it is before the index,
# so that a backfill does not accidentally occur.
if prior_values[field][sid]['dt'] <= digest_frame.index[0]:
digest_frame[sid][0] = prior_values[field][sid]['value']
except KeyError:
# Allow case where there is no previous value.
# e.g. with leading nans.
pass
digest_frame = digest_frame.ffill()
return digest_frame
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
def group_by_frequency(history_specs):
"""
Takes an iterable of history specs and returns a dictionary mapping unique
frequencies to a list of specs with that frequency.
Within each list, the HistorySpecs are sorted by ascending bar count.
Example:
[HistorySpec(3, '1d', 'price', True),
HistorySpec(2, '2d', 'open', True),
HistorySpec(2, '1d', 'open', False),
HistorySpec(5, '1m', 'open', True)]
yields
{Frequency('1d') : [HistorySpec(2, '1d', 'open', False)],
HistorySpec(3, '1d', 'price', True),
Frequency('2d') : [HistorySpec(2, '2d', 'open', True)],
Frequency('1m') : [HistorySpec(5, '1m', 'open', True)]}
"""
return {key: list(group)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
def __init__(self, history_specs, initial_sids, initial_dt):
# History specs to be served by this container.
self.history_specs = history_specs
self.frequency_groups = \
group_by_frequency(itervalues(self.history_specs))
# The set of fields specified by all history specs
self.fields = set(spec.field for spec in itervalues(history_specs))
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(
initial_sids,
initial_dt,
)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Populating initial frames here, so that the cost of creating the
# initial frames does not show up when profiling. These frames are
# cached since mid-stream creation of containing data frames on every
# bar is expensive.
self.create_return_frames(initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = {field: {} for field in self.fields}
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.frequency_groups)
def create_digest_panels(self, initial_sids, initial_dt):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
Relies on the fact that group_by_frequency sorts the value lists by
ascending bar count.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, specs in iteritems(self.frequency_groups):
# Relying on the sorting of group_by_frequency to get the spec
# requiring the largest number of bars.
largest_spec = specs[-1]
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.window_open(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
initial_dates = index_at_dt(largest_spec, initial_dt)
# Set up dates for our first digest roll, which is keyed to the
# close of the first entry in our initial index.
first_window_closes[freq] = initial_dates[0]
first_window_starts[freq] = freq.window_open(initial_dates[0])
rp = RollingPanel(len(initial_dates) - 1,
self.fields,
initial_sids)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_sids, initial_dt):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(freq.max_minutes
for freq in self.unique_frequencies)
rp = RollingPanel(
max_bars_needed,
self.fields,
initial_sids,
# Restrict the initial data down to just the fields being used in
# this container.
)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def create_return_frames(self, algo_dt):
"""
Populates the return frame cache.
Called during init and at universe rollovers.
"""
self.return_frames = {}
for spec_key, history_spec in iteritems(self.history_specs):
index = pd.to_datetime(index_at_dt(history_spec, algo_dt))
frame = pd.DataFrame(
index=index,
columns=self.convert_columns(
self.buffer_panel.minor_axis.values),
dtype=np.float64)
self.return_frames[spec_key] = frame
def buffer_panel_minutes(self,
buffer_panel=None,
earliest_minute=None,
latest_minute=None):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@last_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object. If no panel is supplied, we use self.buffer_panel.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
buffer_panel = buffer_panel or self.buffer_panel
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current()
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
self.update_digest_panels(algo_dt, self.buffer_panel)
fields = self.fields
frame = pd.DataFrame(
{sid: {field: bar[field] for field in fields}
for sid, bar in data.iteritems()
if (bar
and
bar['dt'] == algo_dt
and
# Only use data which is keyed in the data panel.
# Prevents crashes due to custom data.
sid in self.buffer_panel.minor_axis)})
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
"""
for frequency in self.unique_frequencies:
if freq_filter is not None and not freq_filter(frequency):
continue
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
)
# Create a digest from minutes_to_process and add it to
# digest_panel.
self.roll(frequency,
digest_panel,
minutes_to_process,
latest_minute)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def roll(self, frequency, digest_panel, buffer_minutes, digest_dt):
"""
Package up minutes in @buffer_minutes insert that bar into
@digest_panel at index @last_minute, and update
self.cur_window_{starts|closes} for the given frequency.
"""
if digest_panel is None:
# This happens if the only spec we have at this frequency has a bar
# count of 1.
return
rolled = pd.DataFrame(
index=self.fields,
columns=buffer_minutes.minor_axis)
for field in self.fields:
if field in CLOSING_PRICE_FIELDS:
# Use the last close, or NaN if we have no minutes.
try:
prices = buffer_minutes.loc[field].ffill().iloc[-1]
except IndexError:
# Scalar assignment sets the value for all entries.
prices = np.nan
rolled.ix[field] = prices
elif field == 'open_price':
# Use the first open, or NaN if we have no minutes.
try:
opens = buffer_minutes.loc[field].bfill().iloc[0]
except IndexError:
# Scalar assignment sets the value for all entries.
opens = np.nan
rolled.ix['open_price'] = opens
elif field == 'volume':
# Volume is the sum of the volumes during the
# course of the period.
volumes = buffer_minutes.ix['volume'].sum().fillna(0)
rolled.ix['volume'] = volumes
elif field == 'high':
# Use the highest high.
highs = buffer_minutes.ix['high'].max()
rolled.ix['high'] = highs
elif field == 'low':
# Use the lowest low.
lows = buffer_minutes.ix['low'].min()
rolled.ix['low'] = lows
for sid, value in rolled.ix[field].iterkv():
if not np.isnan(value):
try:
prior_values = \
self.last_known_prior_values[field][sid]
except KeyError:
prior_values = {}
self.last_known_prior_values[field][sid] = \
prior_values
prior_values['dt'] = digest_dt
prior_values['value'] = value
digest_panel.add_frame(digest_dt, rolled)
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
bar_count = history_spec.bar_count
do_ffill = history_spec.ffill
index = pd.to_datetime(index_at_dt(history_spec, algo_dt))
return_frame = self.return_frames[history_spec.key_str]
# Overwrite the index.
# Not worrying about values here since the values are overwritten
# in the next step.
return_frame.index = index
if bar_count > 1:
# Get the last bar_count - 1 frames from our stored historical
# frames.
digest_panel = self.digest_panels[history_spec.frequency]\
.get_current()
digest_frame = digest_panel[field].copy().ix[1 - bar_count:]
else:
digest_frame = None
# Get minutes from our buffer panel to build the last row.
buffer_frame = self.buffer_panel_minutes(
earliest_minute=self.cur_window_starts[history_spec.frequency],
)[field]
if do_ffill:
digest_frame = ffill_digest_frame_from_prior_values(
field,
digest_frame,
self.last_known_prior_values,
)
buffer_frame = ffill_buffer_from_prior_values(
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
)
if digest_frame is not None:
return_frame.ix[:-1] = digest_frame.ix[:]
if field == 'volume':
return_frame.ix[algo_dt] = buffer_frame.fillna(0).sum()
elif field == 'high':
return_frame.ix[algo_dt] = buffer_frame.max()
elif field == 'low':
return_frame.ix[algo_dt] = buffer_frame.min()
elif field == 'open_price':
return_frame.ix[algo_dt] = buffer_frame.iloc[0]
else:
return_frame.ix[algo_dt] = buffer_frame.loc[algo_dt]
# Returning a copy of the DataFrame so that we don't crash if the user
# adds columns to the frame. Ideally we would just drop any added
# columns, but pandas 0.12.0 doesn't support in-place dropping of
# columns. We should re-evaluate this implementation once we're on a
# more up-to-date pandas.
return return_frame.copy()
| apache-2.0 |
JeanKossaifi/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
shashankrajput/seq2seq | seq2seq/tasks/dump_attention.py | 6 | 4850 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.decode_text import _get_prediction_length
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_scores(predictions_dict):
"""Returns the attention scores, sliced by source and target length.
"""
prediction_len = _get_prediction_length(predictions_dict)
source_len = predictions_dict["features.source_len"]
return predictions_dict["attention_scores"][:prediction_len, :source_len]
def _create_figure(predictions_dict):
"""Creates and returns a new figure that visualizes
attention scores for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = list(predictions_dict["predicted_tokens"])
prediction_len = _get_prediction_length(predictions_dict)
# Get source words
source_len = predictions_dict["features.source_len"]
source_words = predictions_dict["features.source_tokens"][:source_len]
# Plot
fig = plt.figure(figsize=(8, 8))
plt.imshow(
X=predictions_dict["attention_scores"][:prediction_len, :source_len],
interpolation="nearest",
cmap=plt.cm.Blues)
plt.xticks(np.arange(source_len), source_words, rotation=45)
plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
fig.tight_layout()
return fig
class DumpAttention(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DumpAttention, self).__init__(params)
self._attention_scores_accum = []
self._idx = 0
if not self.params["output_dir"]:
raise ValueError("Must specify output_dir for DumpAttention")
@staticmethod
def default_params():
params = {}
params.update({"output_dir": "", "dump_plots": True})
return params
def begin(self):
super(DumpAttention, self).begin()
gfile.MakeDirs(self.params["output_dir"])
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
if self.params["dump_plots"]:
output_path = os.path.join(self.params["output_dir"],
"{:05d}.png".format(self._idx))
_create_figure(fetches)
plt.savefig(output_path)
plt.close()
tf.logging.info("Wrote %s", output_path)
self._idx += 1
self._attention_scores_accum.append(_get_scores(fetches))
def end(self, _session):
scores_path = os.path.join(self.params["output_dir"],
"attention_scores.npz")
np.savez(scores_path, *self._attention_scores_accum)
tf.logging.info("Wrote %s", scores_path)
| apache-2.0 |
ldirer/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
souljourner/fab | EDA/FOMC.py | 2 | 5156 | from __future__ import print_function
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import pandas as pd
import pickle
import threading
import sys
class FOMC (object):
'''
A convenient class for extracting meeting minutes from the FOMC website
Example Usage:
fomc = FOMC()
df = fomc.get_statements()
fomc.pickle("./df_minutes.pickle")
'''
def __init__(self, base_url='https://www.federalreserve.gov',
calendar_url='https://www.federalreserve.gov/monetarypolicy/fomccalendars.htm',
historical_date = 2011,
verbose = True,
max_threads = 10):
self.base_url = base_url
self.calendar_url = calendar_url
self.df = None
self.links = None
self.dates = None
self.articles = None
self.verbose = verbose
self.HISTORICAL_DATE = historical_date
self.MAX_THREADS = max_threads
def _get_links(self, from_year):
'''
private function that sets all the links for the FOMC meetings from the giving from_year
to the current most recent year
'''
if self.verbose:
print("Getting links...")
self.links = []
fomc_meetings_socket = urlopen(self.calendar_url)
soup = BeautifulSoup(fomc_meetings_socket, 'html.parser')
statements = soup.find_all('a', href=re.compile('^/newsevents/pressreleases/monetary\d{8}a.htm'))
self.links = [statement.attrs['href'] for statement in statements]
if from_year <= self.HISTORICAL_DATE:
for year in range(from_year, self.HISTORICAL_DATE + 1):
fomc_yearly_url = self.base_url + '/monetarypolicy/fomchistorical' + str(year) + '.htm'
fomc_yearly_socket = urlopen(fomc_yearly_url)
soup_yearly = BeautifulSoup(fomc_yearly_socket, 'html.parser')
statements_historical = soup_yearly.findAll('a', text = 'Statement')
for statement_historical in statements_historical:
self.links.append(statement_historical.attrs['href'])
def _date_from_link(self, link):
date = re.findall('[0-9]{8}', link)[0]
if date[4] == '0':
date = "{}/{}/{}".format(date[:4], date[5:6], date[6:])
else:
date = "{}/{}/{}".format(date[:4], date[4:6], date[6:])
return date
def _add_article(self, link, index=None):
'''
adds the related article for 1 link into the instance variable
index is the index in the article to add to. Due to concurrent
prcessing, we need to make sure the articles are stored in the
right order
'''
if self.verbose:
sys.stdout.write(".")
sys.stdout.flush()
# date of the article content
self.dates.append(self._date_from_link(link))
statement_socket = urlopen(self.base_url + link)
statement = BeautifulSoup(statement_socket, 'html.parser')
paragraphs = statement.findAll('p')
self.articles[index]= "\n\n".join([paragraph.get_text().strip() for paragraph in paragraphs])
def _get_articles_multi_threaded(self):
'''
gets all articles using multi-threading
'''
if self.verbose:
print("Getting articles - Multi-threaded...")
self.dates, self.articles = [], ['']*len(self.links)
jobs = []
# initiate and start threads:
index = 0
while index < len(self.links):
if len(jobs) < self.MAX_THREADS:
t = threading.Thread(target=self._add_article, args=(self.links[index],index,))
jobs.append(t)
t.start()
index += 1
else: # wait for threads to complete and join them back into the main thread
t = jobs.pop(0)
t.join()
for t in jobs:
t.join()
for row in range(len(self.articles)):
self.articles[row] = self.articles[row].strip()
def get_statements(self, from_year=1994):
'''
Returns a Pandas DataFrame of meeting minutes with the date as the index
uses a date range of from_year to the most current
Input from_year is ignored if it is within the last 5 years as this is meant for
parsing much older years
'''
self._get_links(from_year)
print("There are", len(self.links), 'statements')
self._get_articles_multi_threaded()
self.df = pd.DataFrame(self.articles, index = pd.to_datetime(self.dates)).sort_index()
self.df.columns = ['statements']
return self.df
def pick_df(self, filename="../data/minutes.pickle"):
if filename:
if self.verbose:
print("Writing to", filename)
with open(filename, "wb") as output_file:
pickle.dump(self.df, output_file)
if __name__ == '__main__':
#Example Usage
fomc = FOMC()
df = fomc.get_statements()
fomc.pickle("./df_minutes.pickle")
| mit |
gmartinvela/Incubator | Incubator/mongo_save.py | 1 | 2777 | from pymongo import MongoClient
import urllib2
import time
import datetime
import json
import sqlite3
import pandas.io.sql as psql
from data_utils import retrieve_DBs, extract_data_from_DB
mongo_client = MongoClient()
mongo_db = mongo_client.incubator
measures_collection = mongo_db.measures
local_path_SHT1xdb = "/home/weblord/Desktop/Incubator/Incubator/static/data/SHT1x.db"
SQL_execute_SHT1xdb = "select max(date), humi from READ"
index_SHT1xdb = "date"
SQL_remove_last_SHT1xdb = "select date, humi from READ"
SHT1x = [local_path_SHT1xdb, SQL_execute_SHT1xdb, index_SHT1xdb, SQL_remove_last_SHT1xdb]
local_path_thermodb = "/home/weblord/Desktop/Incubator/Incubator/static/data/thermo.db"
SQL_execute_thermodb = "select max(DATE_LOG), TEMP_LOG from LOG"
index_thermodb = "DATE_LOG"
SQL_remove_last_thermodb = "select DATE_LOG, TEMP_LOG from LOG"
THERMO = [local_path_thermodb, SQL_execute_thermodb, index_thermodb, SQL_remove_last_thermodb]
DBs = [SHT1x, THERMO]
retrieve_DBs()
dataframes_sqlite = []
all_DBs_list = []
now = datetime.datetime.utcnow()
now_without_seconds = now.strftime("%Y-%m-%d %H:%M")
print "NOW:",now_without_seconds
URL = 'http://localhost:8008/measures'
data_lost = []
def retrieve_row_from_DBs(DBs, rows):
for DB in DBs:
with sqlite3.connect(DB[0], detect_types=sqlite3.PARSE_DECLTYPES) as conn:
all_db = psql.frame_query(DB[3], con=conn)
all_db.index = pd.to_datetime(all_db.pop(DB[2]))
# TODO: This is an approximation. We need data every 15 seconds minimum. In these moments SHT1x go 1:13 seconds
all_db = all_db.resample('15S', fill_method='bfill')
all_DBs_list.append(all_db)
concatenated_db = pd.concat([all_DBs_list[0], all_DBs_list[1]], axis=1)
concatenated_db_filled = h.fillna(method='ffill')
print "HUMI: %.2f" % dataframes_sqlite[0].humi.iloc[0]
print "TEMP: %.2f" % dataframes_sqlite[1].TEMP_LOG.iloc[0]
# Remove this row
def request_without_proxy(URL):
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
request = urllib2.Request(URL)
request_data = opener.open(request).read()
return request_data
def save_in_mongo():
print "Saving all the data to mongodb"
while(1):
# if data_lost:
# try:
# retrieve_DBs()
# retrieve_rows_from_DBS(DBs, len(data_lost))
# except:
# print "Impossible to retrive DB. Fix the problems in the net!"
# time.sleep(10)
# else:
# time.sleep(15)
time.sleep(15)
try:
data = request_without_proxy(URL)
json_data = json.loads(data)
measure = {
'date': datetime.datetime.utcnow(),
'humi': json_data['HUMI'],
'temp': json_data['TEMP']
}
measure_id = measures_collection.insert(measure)
except:
data_lost.append(datetime.datetime.utcnow())
#print measure_id | mit |
chrisburr/scikit-learn | sklearn/metrics/ranking.py | 17 | 26927 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
ZENGXH/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/subplots_axes_and_figures/custom_figure_class.py | 1 | 1517 | """
===================
Custom Figure Class
===================
You can pass a custom Figure constructor to figure if you want to derive from
the default Figure. This simple example creates a figure with a figure title.
"""
import matplotlib.pyplot as plt #import figure, show
from matplotlib.figure import Figure
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
class MyFigure(Figure):
def __init__(self, *args, **kwargs):
"""
custom kwarg figtitle is a figure title
"""
figtitle = kwargs.pop('figtitle', 'hi mom')
Figure.__init__(self, *args, **kwargs)
self.text(0.5, 0.95, figtitle, ha='center')
fig = plt.figure(FigureClass=MyFigure, figtitle='my title')
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
pltshow(plt)
| mit |
hanteng/babel | scripts/geoname_cldr.py | 1 | 2479 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# url_target="https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv"
import csv
import pandas as pd
import codecs
def export_to_csv(df, ex_filename, sep=','):
if sep==',':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_ALL, na_rep='{na}', encoding='utf-8') #+'.csv'
if sep=='\t':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_NONE, na_rep='{na}', encoding='utf-8') #+'.tsv' , escapechar="'", quotechar=""
def import_from_babel_cldr():
from babel import Locale
#staring from the en-US to retrieve keys
locale = Locale('en', 'US')
completelist_territories = locale.territories.keys()
completelist_languages = locale.languages.keys()
#intiate the output dataframe from this
df_cldr=pd.DataFrame.from_dict(locale.territories, orient="index")
df_cldr.index.name='geocode'
df_cldr.columns = ['name_en']
df_cldr.sort_index(inplace=True)
for i_lang in completelist_languages:
#print(i_lang)
try:
locale = Locale.parse(i_lang)
df=pd.DataFrame.from_dict(locale.territories, orient="index")
df.columns = ['name_{0}'.format(i_lang)]
df.sort_index(inplace=True)
df_cldr=df_cldr.join(df)
except:
pass
return df_cldr
###################### MAIN ########################
import os
path_script=os.path.dirname(os.path.abspath(__file__))
#print path_script
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Fetch and generate the country and territory names in languages that are supported by the Unicode CLDR 25.""")
parser.add_argument("-o", "--output", dest="outputpath", default="geoname_CLDR25_babel.csv",
help="write data to a csv file or a tsv file", metavar="OUTPUTPATH")
args = parser.parse_args()
fn = args.outputpath
#print fn
df_cldr=import_from_babel_cldr()
if fn[-3:]=='csv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep=',')
elif fn[-3:]=='tsv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep='\t')
else:
print ("Only csv and tsv formats can be generated. Sorry.")
| bsd-3-clause |
kdebrab/pandas | pandas/tests/indexes/multi/test_set_ops.py | 2 | 8078 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex,
Series, TimedeltaIndex)
def test_setops_errorcases(idx):
# # non-iterable input
cases = [0.5, 'xxx']
methods = [idx.intersection, idx.union, idx.difference,
idx.symmetric_difference]
for method in methods:
for case in cases:
tm.assert_raises_regex(TypeError,
"Input must be Index "
"or array-like",
method, case)
def test_intersection_base(idx):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.intersection([1, 2, 3])
def test_union_base(idx):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.union(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.union(case)
assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.union([1, 2, 3])
def test_difference_base(idx):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.difference(case)
elif isinstance(idx, CategoricalIndex):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
tm.assert_numpy_array_equal(result.sort_values().asi8,
answer.sort_values().asi8)
else:
result = first.difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.difference([1, 2, 3])
def test_symmetric_difference(idx):
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
pass
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.symmetric_difference(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
first.symmetric_difference([1, 2, 3])
def test_empty(idx):
# GH 15270
assert not idx.empty
assert idx[:0].empty
def test_difference(idx):
first = idx
result = first.difference(idx[-3:])
expected = MultiIndex.from_tuples(sorted(idx[:-3].values),
sortorder=0,
names=idx.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == idx.names
# empty difference: reflexive
result = idx.difference(idx)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: superset
result = idx[-3:].difference(idx)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: degenerate
result = idx[:0].difference(idx)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# names not the same
chunklet = idx[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = idx.difference(idx.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_union(idx):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_union = piece1 | piece2
tups = sorted(idx.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = idx.union(idx)
assert the_union is idx
the_union = idx.union(idx[:0])
assert the_union is idx
# won't work in python 3
# tuples = _index.values
# result = _index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(idx)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = _index.union(other)
# assert result.equals(result2)
def test_intersection(idx):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_int = piece1 & piece2
tups = sorted(idx[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = idx.intersection(idx)
assert the_int is idx
# empty intersection: disjoint
empty = idx[:2] & idx[2:]
expected = idx[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = _index.values
# result = _index & tuples
# assert result.equals(tuples)
| bsd-3-clause |
LiaoPan/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
atantet/ergoPack | example/numericalFP/numericalFP_Hopf.py | 1 | 5115 | import numpy as np
import pylibconfig2
from scipy import sparse
from scipy.sparse import linalg
import matplotlib.pyplot as plt
from matplotlib import cm
from ergoNumAna import ChangCooper
readEigVal = False
#readEigVal = True
def hopf(x, mu, omega):
f = np.empty((2,))
f[0] = x[0] * (mu - (x[0]**2 + x[1]**2)) - omega*x[1]
f[1] = x[1] * (mu - (x[0]**2 + x[1]**2)) + omega*x[0]
return f
# Get model
omega = 1.
#q = 0.5
#q = 0.75
#q = 1.
#q = 1.25
#q = 1.5
#q = 1.75
#q = 2.
#q = 2.25
#q = 2.5
#q = 2.75
#q = 3.
#q = 3.25
#q = 3.5
#q = 3.75
q = 4.
muRng = np.arange(-10, 15., 0.1)
k0 = 0
#muRng = np.arange(6.6, 15., 0.1)
#k0 = 166
#muRng = np.arange(-4, 2, 0.1)
#k0 = 60
#muRng = np.arange(2, 8, 0.1)
#k0 = 120
#muRng = np.arange(8, 15, 0.1)
#k0 = 180
#muRng = np.arange(5., 10., 0.1)
#k0 = 150
#muRng = np.array([8.])
#k0 = 180
# Grid definition
dim = 2
nx0 = 100
#nx0 = 200
# give limits for the size of the periodic orbit
# at maximum value of control parameter (when noise
# effects transversally are small)
xlim = np.ones((dim,)) * np.sqrt(15) * 2
# Number of eigenvalues
nev = 100
tol = 1.e-6
B = np.eye(dim) * q
# Get standard deviations
Q = np.dot(B, B.T)
# Get grid points and steps
x = []
dx = np.empty((dim,))
nx = np.ones((dim,), dtype=int) * nx0
for d in np.arange(dim):
x.append(np.linspace(-xlim[d], xlim[d], nx[d]))
dx[d] = x[d][1] - x[d][0]
N = np.prod(nx)
idx = np.indices(nx).reshape(dim, -1)
X = np.meshgrid(*x, indexing='ij')
points = np.empty((dim, N))
for d in np.arange(dim):
points[d] = X[d].flatten()
alpha = 0.0
levels = 20
fs_default = 'x-large'
fs_latex = 'xx-large'
fs_xlabel = fs_default
fs_ylabel = fs_default
fs_xticklabels = fs_default
fs_yticklabels = fs_default
fs_legend_title = fs_default
fs_legend_labels = fs_default
fs_cbar_label = fs_default
#figFormat = 'png'
figFormat = 'eps'
dpi = 300
msize = 32
bbox_inches = 'tight'
plt.rc('font',**{'family':'serif'})
print 'For q = ', q
for k in np.arange(muRng.shape[0]):
mu = muRng[k]
print 'For mu = ', mu
if mu < 0:
signMu = 'm'
else:
signMu = 'p'
postfix = '_nx%d_k%03d_mu%s%02d_q%03d' \
% (nx0, k0 + k, signMu, int(round(np.abs(mu) * 10)), int(round(q * 100)))
if not readEigVal:
# Define drift
def drift(x):
return hopf(x, mu, omega)
# Get discretized Fokker-Planck operator
print 'Discretizing Fokker-Planck operator'
FPO = ChangCooper(points, nx, dx, drift, Q)
print 'Solving eigenvalue problem'
(w, v) = linalg.eigs(FPO, k=nev, which='LR', tol=tol)
isort = np.argsort(-w.real)
w = w[isort]
v = v[:, isort]
rho0 = v[:, 0].real
rho0 /= rho0.sum()
rho0_tile = np.tile(rho0, (dim, 1))
meanPoints = (points * rho0_tile).sum(1)
stdPoints = np.sqrt(((points - np.tile(meanPoints, (N, 1)).T)**2 * rho0_tile).sum(1))
print 'Mean points = ', meanPoints
print 'Std points = ', stdPoints
print 'Saving eigenvalues'
np.savetxt('../results/numericalFP/w_hopf%s.txt' % postfix, w)
np.savetxt('../results/numericalFP/statDist_hopf%s.txt' % postfix, rho0)
else:
print 'Reading eigenvalues'
srcFile = '../results/numericalFP/w_hopf%s.txt' % postfix
fp = open(srcFile, 'r')
w = np.empty((nev,), dtype=complex)
for ev in np.arange(nev):
line = fp.readline()
line = line.replace('+-', '-')
w[ev] = complex(line)
rho0 = np.loadtxt('../results/numericalFP/statDist_hopf%s.txt' % postfix)
print 'Plotting'
fig = plt.figure()
#fig.set_visible(False)
ax = fig.add_subplot(111)
ax.scatter(w.real, w.imag, edgecolors='face')
ax.set_xlim(-30, 0.1)
ax.set_ylim(-10, 10)
ax.text(-29, -9, r'$\mu = %.1f$' % mu, fontsize='xx-large')
fig.savefig('../results/plot/numericalFP/numFP_hopf%s.%s' \
% (postfix, figFormat), bbox_inches='tight', dpi=300)
fig = plt.figure()
ax = fig.add_subplot(111)
vect = rho0.copy()
vecAlpha = vect[vect != 0]
if alpha > 0:
vmax = np.sort(vecAlpha)[int((1. - alpha) \
* vecAlpha.shape[0])]
vect[vect > vmax] = vmax
else:
vmax = np.max(vect)
h = ax.contourf(X[0].T, X[1].T, vect.reshape(nx), levels,
cmap=cm.hot_r, vmin=0., vmax=vmax)
ax.set_xlim(X[0][:, 0].min(), X[0][:, 0].max())
ax.set_ylim(X[1][0].min(), X[1][0].max())
#cbar = plt.colorbar(h)
ax.set_xlabel(r'$x$', fontsize=fs_latex)
ax.set_ylabel(r'$y$', fontsize=fs_latex)
# plt.setp(cbar.ax.get_yticklabels(), fontsize=fs_yticklabels)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
ax.text(-7, -7, r'$\mu = %.1f$' % mu, fontsize='xx-large')
fig.savefig('../results/plot/numericalFP/statDist_hopf%s.%s' \
% (postfix, figFormat), bbox_inches='tight', dpi=300)
plt.close()
| gpl-3.0 |